]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
* inferior.h (disable_randomization): Declare.
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59
60 /* Prototypes for local functions */
61
62 static void signals_info (char *, int);
63
64 static void handle_command (char *, int);
65
66 static void sig_print_info (enum target_signal);
67
68 static void sig_print_header (void);
69
70 static void resume_cleanups (void *);
71
72 static int hook_stop_stub (void *);
73
74 static int restore_selected_frame (void *);
75
76 static int follow_fork (void);
77
78 static void set_schedlock_func (char *args, int from_tty,
79 struct cmd_list_element *c);
80
81 static int currently_stepping (struct thread_info *tp);
82
83 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
84 void *data);
85
86 static void xdb_handle_command (char *args, int from_tty);
87
88 static int prepare_to_proceed (int);
89
90 static void print_exited_reason (int exitstatus);
91
92 static void print_signal_exited_reason (enum target_signal siggnal);
93
94 static void print_no_history_reason (void);
95
96 static void print_signal_received_reason (enum target_signal siggnal);
97
98 static void print_end_stepping_range_reason (void);
99
100 void _initialize_infrun (void);
101
102 void nullify_last_target_wait_ptid (void);
103
104 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
105
106 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
107
108 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
109
110 /* When set, stop the 'step' command if we enter a function which has
111 no line number information. The normal behavior is that we step
112 over such function. */
113 int step_stop_if_no_debug = 0;
114 static void
115 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
119 }
120
121 /* In asynchronous mode, but simulating synchronous execution. */
122
123 int sync_execution = 0;
124
125 /* wait_for_inferior and normal_stop use this to notify the user
126 when the inferior stopped in a different thread than it had been
127 running in. */
128
129 static ptid_t previous_inferior_ptid;
130
131 /* Default behavior is to detach newly forked processes (legacy). */
132 int detach_fork = 1;
133
134 int debug_displaced = 0;
135 static void
136 show_debug_displaced (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138 {
139 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
140 }
141
142 int debug_infrun = 0;
143 static void
144 show_debug_infrun (struct ui_file *file, int from_tty,
145 struct cmd_list_element *c, const char *value)
146 {
147 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
148 }
149
150
151 /* Support for disabling address space randomization. */
152
153 int disable_randomization = 1;
154
155 static void
156 show_disable_randomization (struct ui_file *file, int from_tty,
157 struct cmd_list_element *c, const char *value)
158 {
159 if (target_supports_disable_randomization ())
160 fprintf_filtered (file,
161 _("Disabling randomization of debuggee's "
162 "virtual address space is %s.\n"),
163 value);
164 else
165 fputs_filtered (_("Disabling randomization of debuggee's "
166 "virtual address space is unsupported on\n"
167 "this platform.\n"), file);
168 }
169
170 static void
171 set_disable_randomization (char *args, int from_tty,
172 struct cmd_list_element *c)
173 {
174 if (!target_supports_disable_randomization ())
175 error (_("Disabling randomization of debuggee's "
176 "virtual address space is unsupported on\n"
177 "this platform."));
178 }
179
180
181 /* If the program uses ELF-style shared libraries, then calls to
182 functions in shared libraries go through stubs, which live in a
183 table called the PLT (Procedure Linkage Table). The first time the
184 function is called, the stub sends control to the dynamic linker,
185 which looks up the function's real address, patches the stub so
186 that future calls will go directly to the function, and then passes
187 control to the function.
188
189 If we are stepping at the source level, we don't want to see any of
190 this --- we just want to skip over the stub and the dynamic linker.
191 The simple approach is to single-step until control leaves the
192 dynamic linker.
193
194 However, on some systems (e.g., Red Hat's 5.2 distribution) the
195 dynamic linker calls functions in the shared C library, so you
196 can't tell from the PC alone whether the dynamic linker is still
197 running. In this case, we use a step-resume breakpoint to get us
198 past the dynamic linker, as if we were using "next" to step over a
199 function call.
200
201 in_solib_dynsym_resolve_code() says whether we're in the dynamic
202 linker code or not. Normally, this means we single-step. However,
203 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
204 address where we can place a step-resume breakpoint to get past the
205 linker's symbol resolution function.
206
207 in_solib_dynsym_resolve_code() can generally be implemented in a
208 pretty portable way, by comparing the PC against the address ranges
209 of the dynamic linker's sections.
210
211 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
212 it depends on internal details of the dynamic linker. It's usually
213 not too hard to figure out where to put a breakpoint, but it
214 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
215 sanity checking. If it can't figure things out, returning zero and
216 getting the (possibly confusing) stepping behavior is better than
217 signalling an error, which will obscure the change in the
218 inferior's state. */
219
220 /* This function returns TRUE if pc is the address of an instruction
221 that lies within the dynamic linker (such as the event hook, or the
222 dld itself).
223
224 This function must be used only when a dynamic linker event has
225 been caught, and the inferior is being stepped out of the hook, or
226 undefined results are guaranteed. */
227
228 #ifndef SOLIB_IN_DYNAMIC_LINKER
229 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
230 #endif
231
232 /* "Observer mode" is somewhat like a more extreme version of
233 non-stop, in which all GDB operations that might affect the
234 target's execution have been disabled. */
235
236 static int non_stop_1 = 0;
237
238 int observer_mode = 0;
239 static int observer_mode_1 = 0;
240
241 static void
242 set_observer_mode (char *args, int from_tty,
243 struct cmd_list_element *c)
244 {
245 extern int pagination_enabled;
246
247 if (target_has_execution)
248 {
249 observer_mode_1 = observer_mode;
250 error (_("Cannot change this setting while the inferior is running."));
251 }
252
253 observer_mode = observer_mode_1;
254
255 may_write_registers = !observer_mode;
256 may_write_memory = !observer_mode;
257 may_insert_breakpoints = !observer_mode;
258 may_insert_tracepoints = !observer_mode;
259 /* We can insert fast tracepoints in or out of observer mode,
260 but enable them if we're going into this mode. */
261 if (observer_mode)
262 may_insert_fast_tracepoints = 1;
263 may_stop = !observer_mode;
264 update_target_permissions ();
265
266 /* Going *into* observer mode we must force non-stop, then
267 going out we leave it that way. */
268 if (observer_mode)
269 {
270 target_async_permitted = 1;
271 pagination_enabled = 0;
272 non_stop = non_stop_1 = 1;
273 }
274
275 if (from_tty)
276 printf_filtered (_("Observer mode is now %s.\n"),
277 (observer_mode ? "on" : "off"));
278 }
279
280 static void
281 show_observer_mode (struct ui_file *file, int from_tty,
282 struct cmd_list_element *c, const char *value)
283 {
284 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
285 }
286
287 /* This updates the value of observer mode based on changes in
288 permissions. Note that we are deliberately ignoring the values of
289 may-write-registers and may-write-memory, since the user may have
290 reason to enable these during a session, for instance to turn on a
291 debugging-related global. */
292
293 void
294 update_observer_mode (void)
295 {
296 int newval;
297
298 newval = (!may_insert_breakpoints
299 && !may_insert_tracepoints
300 && may_insert_fast_tracepoints
301 && !may_stop
302 && non_stop);
303
304 /* Let the user know if things change. */
305 if (newval != observer_mode)
306 printf_filtered (_("Observer mode is now %s.\n"),
307 (newval ? "on" : "off"));
308
309 observer_mode = observer_mode_1 = newval;
310 }
311
312 /* Tables of how to react to signals; the user sets them. */
313
314 static unsigned char *signal_stop;
315 static unsigned char *signal_print;
316 static unsigned char *signal_program;
317
318 /* Table of signals that the target may silently handle.
319 This is automatically determined from the flags above,
320 and simply cached here. */
321 static unsigned char *signal_pass;
322
323 #define SET_SIGS(nsigs,sigs,flags) \
324 do { \
325 int signum = (nsigs); \
326 while (signum-- > 0) \
327 if ((sigs)[signum]) \
328 (flags)[signum] = 1; \
329 } while (0)
330
331 #define UNSET_SIGS(nsigs,sigs,flags) \
332 do { \
333 int signum = (nsigs); \
334 while (signum-- > 0) \
335 if ((sigs)[signum]) \
336 (flags)[signum] = 0; \
337 } while (0)
338
339 /* Value to pass to target_resume() to cause all threads to resume. */
340
341 #define RESUME_ALL minus_one_ptid
342
343 /* Command list pointer for the "stop" placeholder. */
344
345 static struct cmd_list_element *stop_command;
346
347 /* Function inferior was in as of last step command. */
348
349 static struct symbol *step_start_function;
350
351 /* Nonzero if we want to give control to the user when we're notified
352 of shared library events by the dynamic linker. */
353 int stop_on_solib_events;
354 static void
355 show_stop_on_solib_events (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
359 value);
360 }
361
362 /* Nonzero means expecting a trace trap
363 and should stop the inferior and return silently when it happens. */
364
365 int stop_after_trap;
366
367 /* Save register contents here when executing a "finish" command or are
368 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
369 Thus this contains the return value from the called function (assuming
370 values are returned in a register). */
371
372 struct regcache *stop_registers;
373
374 /* Nonzero after stop if current stack frame should be printed. */
375
376 static int stop_print_frame;
377
378 /* This is a cached copy of the pid/waitstatus of the last event
379 returned by target_wait()/deprecated_target_wait_hook(). This
380 information is returned by get_last_target_status(). */
381 static ptid_t target_last_wait_ptid;
382 static struct target_waitstatus target_last_waitstatus;
383
384 static void context_switch (ptid_t ptid);
385
386 void init_thread_stepping_state (struct thread_info *tss);
387
388 void init_infwait_state (void);
389
390 static const char follow_fork_mode_child[] = "child";
391 static const char follow_fork_mode_parent[] = "parent";
392
393 static const char *follow_fork_mode_kind_names[] = {
394 follow_fork_mode_child,
395 follow_fork_mode_parent,
396 NULL
397 };
398
399 static const char *follow_fork_mode_string = follow_fork_mode_parent;
400 static void
401 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
402 struct cmd_list_element *c, const char *value)
403 {
404 fprintf_filtered (file,
405 _("Debugger response to a program "
406 "call of fork or vfork is \"%s\".\n"),
407 value);
408 }
409 \f
410
411 /* Tell the target to follow the fork we're stopped at. Returns true
412 if the inferior should be resumed; false, if the target for some
413 reason decided it's best not to resume. */
414
415 static int
416 follow_fork (void)
417 {
418 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
419 int should_resume = 1;
420 struct thread_info *tp;
421
422 /* Copy user stepping state to the new inferior thread. FIXME: the
423 followed fork child thread should have a copy of most of the
424 parent thread structure's run control related fields, not just these.
425 Initialized to avoid "may be used uninitialized" warnings from gcc. */
426 struct breakpoint *step_resume_breakpoint = NULL;
427 struct breakpoint *exception_resume_breakpoint = NULL;
428 CORE_ADDR step_range_start = 0;
429 CORE_ADDR step_range_end = 0;
430 struct frame_id step_frame_id = { 0 };
431
432 if (!non_stop)
433 {
434 ptid_t wait_ptid;
435 struct target_waitstatus wait_status;
436
437 /* Get the last target status returned by target_wait(). */
438 get_last_target_status (&wait_ptid, &wait_status);
439
440 /* If not stopped at a fork event, then there's nothing else to
441 do. */
442 if (wait_status.kind != TARGET_WAITKIND_FORKED
443 && wait_status.kind != TARGET_WAITKIND_VFORKED)
444 return 1;
445
446 /* Check if we switched over from WAIT_PTID, since the event was
447 reported. */
448 if (!ptid_equal (wait_ptid, minus_one_ptid)
449 && !ptid_equal (inferior_ptid, wait_ptid))
450 {
451 /* We did. Switch back to WAIT_PTID thread, to tell the
452 target to follow it (in either direction). We'll
453 afterwards refuse to resume, and inform the user what
454 happened. */
455 switch_to_thread (wait_ptid);
456 should_resume = 0;
457 }
458 }
459
460 tp = inferior_thread ();
461
462 /* If there were any forks/vforks that were caught and are now to be
463 followed, then do so now. */
464 switch (tp->pending_follow.kind)
465 {
466 case TARGET_WAITKIND_FORKED:
467 case TARGET_WAITKIND_VFORKED:
468 {
469 ptid_t parent, child;
470
471 /* If the user did a next/step, etc, over a fork call,
472 preserve the stepping state in the fork child. */
473 if (follow_child && should_resume)
474 {
475 step_resume_breakpoint = clone_momentary_breakpoint
476 (tp->control.step_resume_breakpoint);
477 step_range_start = tp->control.step_range_start;
478 step_range_end = tp->control.step_range_end;
479 step_frame_id = tp->control.step_frame_id;
480 exception_resume_breakpoint
481 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
482
483 /* For now, delete the parent's sr breakpoint, otherwise,
484 parent/child sr breakpoints are considered duplicates,
485 and the child version will not be installed. Remove
486 this when the breakpoints module becomes aware of
487 inferiors and address spaces. */
488 delete_step_resume_breakpoint (tp);
489 tp->control.step_range_start = 0;
490 tp->control.step_range_end = 0;
491 tp->control.step_frame_id = null_frame_id;
492 delete_exception_resume_breakpoint (tp);
493 }
494
495 parent = inferior_ptid;
496 child = tp->pending_follow.value.related_pid;
497
498 /* Tell the target to do whatever is necessary to follow
499 either parent or child. */
500 if (target_follow_fork (follow_child))
501 {
502 /* Target refused to follow, or there's some other reason
503 we shouldn't resume. */
504 should_resume = 0;
505 }
506 else
507 {
508 /* This pending follow fork event is now handled, one way
509 or another. The previous selected thread may be gone
510 from the lists by now, but if it is still around, need
511 to clear the pending follow request. */
512 tp = find_thread_ptid (parent);
513 if (tp)
514 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
515
516 /* This makes sure we don't try to apply the "Switched
517 over from WAIT_PID" logic above. */
518 nullify_last_target_wait_ptid ();
519
520 /* If we followed the child, switch to it... */
521 if (follow_child)
522 {
523 switch_to_thread (child);
524
525 /* ... and preserve the stepping state, in case the
526 user was stepping over the fork call. */
527 if (should_resume)
528 {
529 tp = inferior_thread ();
530 tp->control.step_resume_breakpoint
531 = step_resume_breakpoint;
532 tp->control.step_range_start = step_range_start;
533 tp->control.step_range_end = step_range_end;
534 tp->control.step_frame_id = step_frame_id;
535 tp->control.exception_resume_breakpoint
536 = exception_resume_breakpoint;
537 }
538 else
539 {
540 /* If we get here, it was because we're trying to
541 resume from a fork catchpoint, but, the user
542 has switched threads away from the thread that
543 forked. In that case, the resume command
544 issued is most likely not applicable to the
545 child, so just warn, and refuse to resume. */
546 warning (_("Not resuming: switched threads "
547 "before following fork child.\n"));
548 }
549
550 /* Reset breakpoints in the child as appropriate. */
551 follow_inferior_reset_breakpoints ();
552 }
553 else
554 switch_to_thread (parent);
555 }
556 }
557 break;
558 case TARGET_WAITKIND_SPURIOUS:
559 /* Nothing to follow. */
560 break;
561 default:
562 internal_error (__FILE__, __LINE__,
563 "Unexpected pending_follow.kind %d\n",
564 tp->pending_follow.kind);
565 break;
566 }
567
568 return should_resume;
569 }
570
571 void
572 follow_inferior_reset_breakpoints (void)
573 {
574 struct thread_info *tp = inferior_thread ();
575
576 /* Was there a step_resume breakpoint? (There was if the user
577 did a "next" at the fork() call.) If so, explicitly reset its
578 thread number.
579
580 step_resumes are a form of bp that are made to be per-thread.
581 Since we created the step_resume bp when the parent process
582 was being debugged, and now are switching to the child process,
583 from the breakpoint package's viewpoint, that's a switch of
584 "threads". We must update the bp's notion of which thread
585 it is for, or it'll be ignored when it triggers. */
586
587 if (tp->control.step_resume_breakpoint)
588 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
589
590 if (tp->control.exception_resume_breakpoint)
591 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
592
593 /* Reinsert all breakpoints in the child. The user may have set
594 breakpoints after catching the fork, in which case those
595 were never set in the child, but only in the parent. This makes
596 sure the inserted breakpoints match the breakpoint list. */
597
598 breakpoint_re_set ();
599 insert_breakpoints ();
600 }
601
602 /* The child has exited or execed: resume threads of the parent the
603 user wanted to be executing. */
604
605 static int
606 proceed_after_vfork_done (struct thread_info *thread,
607 void *arg)
608 {
609 int pid = * (int *) arg;
610
611 if (ptid_get_pid (thread->ptid) == pid
612 && is_running (thread->ptid)
613 && !is_executing (thread->ptid)
614 && !thread->stop_requested
615 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
616 {
617 if (debug_infrun)
618 fprintf_unfiltered (gdb_stdlog,
619 "infrun: resuming vfork parent thread %s\n",
620 target_pid_to_str (thread->ptid));
621
622 switch_to_thread (thread->ptid);
623 clear_proceed_status ();
624 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
625 }
626
627 return 0;
628 }
629
630 /* Called whenever we notice an exec or exit event, to handle
631 detaching or resuming a vfork parent. */
632
633 static void
634 handle_vfork_child_exec_or_exit (int exec)
635 {
636 struct inferior *inf = current_inferior ();
637
638 if (inf->vfork_parent)
639 {
640 int resume_parent = -1;
641
642 /* This exec or exit marks the end of the shared memory region
643 between the parent and the child. If the user wanted to
644 detach from the parent, now is the time. */
645
646 if (inf->vfork_parent->pending_detach)
647 {
648 struct thread_info *tp;
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651 struct address_space *aspace;
652
653 /* follow-fork child, detach-on-fork on. */
654
655 old_chain = make_cleanup_restore_current_thread ();
656
657 /* We're letting loose of the parent. */
658 tp = any_live_thread_of_process (inf->vfork_parent->pid);
659 switch_to_thread (tp->ptid);
660
661 /* We're about to detach from the parent, which implicitly
662 removes breakpoints from its address space. There's a
663 catch here: we want to reuse the spaces for the child,
664 but, parent/child are still sharing the pspace at this
665 point, although the exec in reality makes the kernel give
666 the child a fresh set of new pages. The problem here is
667 that the breakpoints module being unaware of this, would
668 likely chose the child process to write to the parent
669 address space. Swapping the child temporarily away from
670 the spaces has the desired effect. Yes, this is "sort
671 of" a hack. */
672
673 pspace = inf->pspace;
674 aspace = inf->aspace;
675 inf->aspace = NULL;
676 inf->pspace = NULL;
677
678 if (debug_infrun || info_verbose)
679 {
680 target_terminal_ours ();
681
682 if (exec)
683 fprintf_filtered (gdb_stdlog,
684 "Detaching vfork parent process "
685 "%d after child exec.\n",
686 inf->vfork_parent->pid);
687 else
688 fprintf_filtered (gdb_stdlog,
689 "Detaching vfork parent process "
690 "%d after child exit.\n",
691 inf->vfork_parent->pid);
692 }
693
694 target_detach (NULL, 0);
695
696 /* Put it back. */
697 inf->pspace = pspace;
698 inf->aspace = aspace;
699
700 do_cleanups (old_chain);
701 }
702 else if (exec)
703 {
704 /* We're staying attached to the parent, so, really give the
705 child a new address space. */
706 inf->pspace = add_program_space (maybe_new_address_space ());
707 inf->aspace = inf->pspace->aspace;
708 inf->removable = 1;
709 set_current_program_space (inf->pspace);
710
711 resume_parent = inf->vfork_parent->pid;
712
713 /* Break the bonds. */
714 inf->vfork_parent->vfork_child = NULL;
715 }
716 else
717 {
718 struct cleanup *old_chain;
719 struct program_space *pspace;
720
721 /* If this is a vfork child exiting, then the pspace and
722 aspaces were shared with the parent. Since we're
723 reporting the process exit, we'll be mourning all that is
724 found in the address space, and switching to null_ptid,
725 preparing to start a new inferior. But, since we don't
726 want to clobber the parent's address/program spaces, we
727 go ahead and create a new one for this exiting
728 inferior. */
729
730 /* Switch to null_ptid, so that clone_program_space doesn't want
731 to read the selected frame of a dead process. */
732 old_chain = save_inferior_ptid ();
733 inferior_ptid = null_ptid;
734
735 /* This inferior is dead, so avoid giving the breakpoints
736 module the option to write through to it (cloning a
737 program space resets breakpoints). */
738 inf->aspace = NULL;
739 inf->pspace = NULL;
740 pspace = add_program_space (maybe_new_address_space ());
741 set_current_program_space (pspace);
742 inf->removable = 1;
743 clone_program_space (pspace, inf->vfork_parent->pspace);
744 inf->pspace = pspace;
745 inf->aspace = pspace->aspace;
746
747 /* Put back inferior_ptid. We'll continue mourning this
748 inferior. */
749 do_cleanups (old_chain);
750
751 resume_parent = inf->vfork_parent->pid;
752 /* Break the bonds. */
753 inf->vfork_parent->vfork_child = NULL;
754 }
755
756 inf->vfork_parent = NULL;
757
758 gdb_assert (current_program_space == inf->pspace);
759
760 if (non_stop && resume_parent != -1)
761 {
762 /* If the user wanted the parent to be running, let it go
763 free now. */
764 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
765
766 if (debug_infrun)
767 fprintf_unfiltered (gdb_stdlog,
768 "infrun: resuming vfork parent process %d\n",
769 resume_parent);
770
771 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
772
773 do_cleanups (old_chain);
774 }
775 }
776 }
777
778 /* Enum strings for "set|show displaced-stepping". */
779
780 static const char follow_exec_mode_new[] = "new";
781 static const char follow_exec_mode_same[] = "same";
782 static const char *follow_exec_mode_names[] =
783 {
784 follow_exec_mode_new,
785 follow_exec_mode_same,
786 NULL,
787 };
788
789 static const char *follow_exec_mode_string = follow_exec_mode_same;
790 static void
791 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
792 struct cmd_list_element *c, const char *value)
793 {
794 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
795 }
796
797 /* EXECD_PATHNAME is assumed to be non-NULL. */
798
799 static void
800 follow_exec (ptid_t pid, char *execd_pathname)
801 {
802 struct thread_info *th = inferior_thread ();
803 struct inferior *inf = current_inferior ();
804
805 /* This is an exec event that we actually wish to pay attention to.
806 Refresh our symbol table to the newly exec'd program, remove any
807 momentary bp's, etc.
808
809 If there are breakpoints, they aren't really inserted now,
810 since the exec() transformed our inferior into a fresh set
811 of instructions.
812
813 We want to preserve symbolic breakpoints on the list, since
814 we have hopes that they can be reset after the new a.out's
815 symbol table is read.
816
817 However, any "raw" breakpoints must be removed from the list
818 (e.g., the solib bp's), since their address is probably invalid
819 now.
820
821 And, we DON'T want to call delete_breakpoints() here, since
822 that may write the bp's "shadow contents" (the instruction
823 value that was overwritten witha TRAP instruction). Since
824 we now have a new a.out, those shadow contents aren't valid. */
825
826 mark_breakpoints_out ();
827
828 update_breakpoints_after_exec ();
829
830 /* If there was one, it's gone now. We cannot truly step-to-next
831 statement through an exec(). */
832 th->control.step_resume_breakpoint = NULL;
833 th->control.exception_resume_breakpoint = NULL;
834 th->control.step_range_start = 0;
835 th->control.step_range_end = 0;
836
837 /* The target reports the exec event to the main thread, even if
838 some other thread does the exec, and even if the main thread was
839 already stopped --- if debugging in non-stop mode, it's possible
840 the user had the main thread held stopped in the previous image
841 --- release it now. This is the same behavior as step-over-exec
842 with scheduler-locking on in all-stop mode. */
843 th->stop_requested = 0;
844
845 /* What is this a.out's name? */
846 printf_unfiltered (_("%s is executing new program: %s\n"),
847 target_pid_to_str (inferior_ptid),
848 execd_pathname);
849
850 /* We've followed the inferior through an exec. Therefore, the
851 inferior has essentially been killed & reborn. */
852
853 gdb_flush (gdb_stdout);
854
855 breakpoint_init_inferior (inf_execd);
856
857 if (gdb_sysroot && *gdb_sysroot)
858 {
859 char *name = alloca (strlen (gdb_sysroot)
860 + strlen (execd_pathname)
861 + 1);
862
863 strcpy (name, gdb_sysroot);
864 strcat (name, execd_pathname);
865 execd_pathname = name;
866 }
867
868 /* Reset the shared library package. This ensures that we get a
869 shlib event when the child reaches "_start", at which point the
870 dld will have had a chance to initialize the child. */
871 /* Also, loading a symbol file below may trigger symbol lookups, and
872 we don't want those to be satisfied by the libraries of the
873 previous incarnation of this process. */
874 no_shared_libraries (NULL, 0);
875
876 if (follow_exec_mode_string == follow_exec_mode_new)
877 {
878 struct program_space *pspace;
879
880 /* The user wants to keep the old inferior and program spaces
881 around. Create a new fresh one, and switch to it. */
882
883 inf = add_inferior (current_inferior ()->pid);
884 pspace = add_program_space (maybe_new_address_space ());
885 inf->pspace = pspace;
886 inf->aspace = pspace->aspace;
887
888 exit_inferior_num_silent (current_inferior ()->num);
889
890 set_current_inferior (inf);
891 set_current_program_space (pspace);
892 }
893
894 gdb_assert (current_program_space == inf->pspace);
895
896 /* That a.out is now the one to use. */
897 exec_file_attach (execd_pathname, 0);
898
899 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
900 (Position Independent Executable) main symbol file will get applied by
901 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
902 the breakpoints with the zero displacement. */
903
904 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
905 NULL, 0);
906
907 set_initial_language ();
908
909 #ifdef SOLIB_CREATE_INFERIOR_HOOK
910 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
911 #else
912 solib_create_inferior_hook (0);
913 #endif
914
915 jit_inferior_created_hook ();
916
917 breakpoint_re_set ();
918
919 /* Reinsert all breakpoints. (Those which were symbolic have
920 been reset to the proper address in the new a.out, thanks
921 to symbol_file_command...). */
922 insert_breakpoints ();
923
924 /* The next resume of this inferior should bring it to the shlib
925 startup breakpoints. (If the user had also set bp's on
926 "main" from the old (parent) process, then they'll auto-
927 matically get reset there in the new process.). */
928 }
929
930 /* Non-zero if we just simulating a single-step. This is needed
931 because we cannot remove the breakpoints in the inferior process
932 until after the `wait' in `wait_for_inferior'. */
933 static int singlestep_breakpoints_inserted_p = 0;
934
935 /* The thread we inserted single-step breakpoints for. */
936 static ptid_t singlestep_ptid;
937
938 /* PC when we started this single-step. */
939 static CORE_ADDR singlestep_pc;
940
941 /* If another thread hit the singlestep breakpoint, we save the original
942 thread here so that we can resume single-stepping it later. */
943 static ptid_t saved_singlestep_ptid;
944 static int stepping_past_singlestep_breakpoint;
945
946 /* If not equal to null_ptid, this means that after stepping over breakpoint
947 is finished, we need to switch to deferred_step_ptid, and step it.
948
949 The use case is when one thread has hit a breakpoint, and then the user
950 has switched to another thread and issued 'step'. We need to step over
951 breakpoint in the thread which hit the breakpoint, but then continue
952 stepping the thread user has selected. */
953 static ptid_t deferred_step_ptid;
954 \f
955 /* Displaced stepping. */
956
957 /* In non-stop debugging mode, we must take special care to manage
958 breakpoints properly; in particular, the traditional strategy for
959 stepping a thread past a breakpoint it has hit is unsuitable.
960 'Displaced stepping' is a tactic for stepping one thread past a
961 breakpoint it has hit while ensuring that other threads running
962 concurrently will hit the breakpoint as they should.
963
964 The traditional way to step a thread T off a breakpoint in a
965 multi-threaded program in all-stop mode is as follows:
966
967 a0) Initially, all threads are stopped, and breakpoints are not
968 inserted.
969 a1) We single-step T, leaving breakpoints uninserted.
970 a2) We insert breakpoints, and resume all threads.
971
972 In non-stop debugging, however, this strategy is unsuitable: we
973 don't want to have to stop all threads in the system in order to
974 continue or step T past a breakpoint. Instead, we use displaced
975 stepping:
976
977 n0) Initially, T is stopped, other threads are running, and
978 breakpoints are inserted.
979 n1) We copy the instruction "under" the breakpoint to a separate
980 location, outside the main code stream, making any adjustments
981 to the instruction, register, and memory state as directed by
982 T's architecture.
983 n2) We single-step T over the instruction at its new location.
984 n3) We adjust the resulting register and memory state as directed
985 by T's architecture. This includes resetting T's PC to point
986 back into the main instruction stream.
987 n4) We resume T.
988
989 This approach depends on the following gdbarch methods:
990
991 - gdbarch_max_insn_length and gdbarch_displaced_step_location
992 indicate where to copy the instruction, and how much space must
993 be reserved there. We use these in step n1.
994
995 - gdbarch_displaced_step_copy_insn copies a instruction to a new
996 address, and makes any necessary adjustments to the instruction,
997 register contents, and memory. We use this in step n1.
998
999 - gdbarch_displaced_step_fixup adjusts registers and memory after
1000 we have successfuly single-stepped the instruction, to yield the
1001 same effect the instruction would have had if we had executed it
1002 at its original address. We use this in step n3.
1003
1004 - gdbarch_displaced_step_free_closure provides cleanup.
1005
1006 The gdbarch_displaced_step_copy_insn and
1007 gdbarch_displaced_step_fixup functions must be written so that
1008 copying an instruction with gdbarch_displaced_step_copy_insn,
1009 single-stepping across the copied instruction, and then applying
1010 gdbarch_displaced_insn_fixup should have the same effects on the
1011 thread's memory and registers as stepping the instruction in place
1012 would have. Exactly which responsibilities fall to the copy and
1013 which fall to the fixup is up to the author of those functions.
1014
1015 See the comments in gdbarch.sh for details.
1016
1017 Note that displaced stepping and software single-step cannot
1018 currently be used in combination, although with some care I think
1019 they could be made to. Software single-step works by placing
1020 breakpoints on all possible subsequent instructions; if the
1021 displaced instruction is a PC-relative jump, those breakpoints
1022 could fall in very strange places --- on pages that aren't
1023 executable, or at addresses that are not proper instruction
1024 boundaries. (We do generally let other threads run while we wait
1025 to hit the software single-step breakpoint, and they might
1026 encounter such a corrupted instruction.) One way to work around
1027 this would be to have gdbarch_displaced_step_copy_insn fully
1028 simulate the effect of PC-relative instructions (and return NULL)
1029 on architectures that use software single-stepping.
1030
1031 In non-stop mode, we can have independent and simultaneous step
1032 requests, so more than one thread may need to simultaneously step
1033 over a breakpoint. The current implementation assumes there is
1034 only one scratch space per process. In this case, we have to
1035 serialize access to the scratch space. If thread A wants to step
1036 over a breakpoint, but we are currently waiting for some other
1037 thread to complete a displaced step, we leave thread A stopped and
1038 place it in the displaced_step_request_queue. Whenever a displaced
1039 step finishes, we pick the next thread in the queue and start a new
1040 displaced step operation on it. See displaced_step_prepare and
1041 displaced_step_fixup for details. */
1042
1043 struct displaced_step_request
1044 {
1045 ptid_t ptid;
1046 struct displaced_step_request *next;
1047 };
1048
1049 /* Per-inferior displaced stepping state. */
1050 struct displaced_step_inferior_state
1051 {
1052 /* Pointer to next in linked list. */
1053 struct displaced_step_inferior_state *next;
1054
1055 /* The process this displaced step state refers to. */
1056 int pid;
1057
1058 /* A queue of pending displaced stepping requests. One entry per
1059 thread that needs to do a displaced step. */
1060 struct displaced_step_request *step_request_queue;
1061
1062 /* If this is not null_ptid, this is the thread carrying out a
1063 displaced single-step in process PID. This thread's state will
1064 require fixing up once it has completed its step. */
1065 ptid_t step_ptid;
1066
1067 /* The architecture the thread had when we stepped it. */
1068 struct gdbarch *step_gdbarch;
1069
1070 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1071 for post-step cleanup. */
1072 struct displaced_step_closure *step_closure;
1073
1074 /* The address of the original instruction, and the copy we
1075 made. */
1076 CORE_ADDR step_original, step_copy;
1077
1078 /* Saved contents of copy area. */
1079 gdb_byte *step_saved_copy;
1080 };
1081
1082 /* The list of states of processes involved in displaced stepping
1083 presently. */
1084 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1085
1086 /* Get the displaced stepping state of process PID. */
1087
1088 static struct displaced_step_inferior_state *
1089 get_displaced_stepping_state (int pid)
1090 {
1091 struct displaced_step_inferior_state *state;
1092
1093 for (state = displaced_step_inferior_states;
1094 state != NULL;
1095 state = state->next)
1096 if (state->pid == pid)
1097 return state;
1098
1099 return NULL;
1100 }
1101
1102 /* Add a new displaced stepping state for process PID to the displaced
1103 stepping state list, or return a pointer to an already existing
1104 entry, if it already exists. Never returns NULL. */
1105
1106 static struct displaced_step_inferior_state *
1107 add_displaced_stepping_state (int pid)
1108 {
1109 struct displaced_step_inferior_state *state;
1110
1111 for (state = displaced_step_inferior_states;
1112 state != NULL;
1113 state = state->next)
1114 if (state->pid == pid)
1115 return state;
1116
1117 state = xcalloc (1, sizeof (*state));
1118 state->pid = pid;
1119 state->next = displaced_step_inferior_states;
1120 displaced_step_inferior_states = state;
1121
1122 return state;
1123 }
1124
1125 /* If inferior is in displaced stepping, and ADDR equals to starting address
1126 of copy area, return corresponding displaced_step_closure. Otherwise,
1127 return NULL. */
1128
1129 struct displaced_step_closure*
1130 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1131 {
1132 struct displaced_step_inferior_state *displaced
1133 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1134
1135 /* If checking the mode of displaced instruction in copy area. */
1136 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1137 && (displaced->step_copy == addr))
1138 return displaced->step_closure;
1139
1140 return NULL;
1141 }
1142
1143 /* Remove the displaced stepping state of process PID. */
1144
1145 static void
1146 remove_displaced_stepping_state (int pid)
1147 {
1148 struct displaced_step_inferior_state *it, **prev_next_p;
1149
1150 gdb_assert (pid != 0);
1151
1152 it = displaced_step_inferior_states;
1153 prev_next_p = &displaced_step_inferior_states;
1154 while (it)
1155 {
1156 if (it->pid == pid)
1157 {
1158 *prev_next_p = it->next;
1159 xfree (it);
1160 return;
1161 }
1162
1163 prev_next_p = &it->next;
1164 it = *prev_next_p;
1165 }
1166 }
1167
1168 static void
1169 infrun_inferior_exit (struct inferior *inf)
1170 {
1171 remove_displaced_stepping_state (inf->pid);
1172 }
1173
1174 /* Enum strings for "set|show displaced-stepping". */
1175
1176 static const char can_use_displaced_stepping_auto[] = "auto";
1177 static const char can_use_displaced_stepping_on[] = "on";
1178 static const char can_use_displaced_stepping_off[] = "off";
1179 static const char *can_use_displaced_stepping_enum[] =
1180 {
1181 can_use_displaced_stepping_auto,
1182 can_use_displaced_stepping_on,
1183 can_use_displaced_stepping_off,
1184 NULL,
1185 };
1186
1187 /* If ON, and the architecture supports it, GDB will use displaced
1188 stepping to step over breakpoints. If OFF, or if the architecture
1189 doesn't support it, GDB will instead use the traditional
1190 hold-and-step approach. If AUTO (which is the default), GDB will
1191 decide which technique to use to step over breakpoints depending on
1192 which of all-stop or non-stop mode is active --- displaced stepping
1193 in non-stop mode; hold-and-step in all-stop mode. */
1194
1195 static const char *can_use_displaced_stepping =
1196 can_use_displaced_stepping_auto;
1197
1198 static void
1199 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1200 struct cmd_list_element *c,
1201 const char *value)
1202 {
1203 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1204 fprintf_filtered (file,
1205 _("Debugger's willingness to use displaced stepping "
1206 "to step over breakpoints is %s (currently %s).\n"),
1207 value, non_stop ? "on" : "off");
1208 else
1209 fprintf_filtered (file,
1210 _("Debugger's willingness to use displaced stepping "
1211 "to step over breakpoints is %s.\n"), value);
1212 }
1213
1214 /* Return non-zero if displaced stepping can/should be used to step
1215 over breakpoints. */
1216
1217 static int
1218 use_displaced_stepping (struct gdbarch *gdbarch)
1219 {
1220 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1221 && non_stop)
1222 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1223 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1224 && !RECORD_IS_USED);
1225 }
1226
1227 /* Clean out any stray displaced stepping state. */
1228 static void
1229 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1230 {
1231 /* Indicate that there is no cleanup pending. */
1232 displaced->step_ptid = null_ptid;
1233
1234 if (displaced->step_closure)
1235 {
1236 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1237 displaced->step_closure);
1238 displaced->step_closure = NULL;
1239 }
1240 }
1241
1242 static void
1243 displaced_step_clear_cleanup (void *arg)
1244 {
1245 struct displaced_step_inferior_state *state = arg;
1246
1247 displaced_step_clear (state);
1248 }
1249
1250 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1251 void
1252 displaced_step_dump_bytes (struct ui_file *file,
1253 const gdb_byte *buf,
1254 size_t len)
1255 {
1256 int i;
1257
1258 for (i = 0; i < len; i++)
1259 fprintf_unfiltered (file, "%02x ", buf[i]);
1260 fputs_unfiltered ("\n", file);
1261 }
1262
1263 /* Prepare to single-step, using displaced stepping.
1264
1265 Note that we cannot use displaced stepping when we have a signal to
1266 deliver. If we have a signal to deliver and an instruction to step
1267 over, then after the step, there will be no indication from the
1268 target whether the thread entered a signal handler or ignored the
1269 signal and stepped over the instruction successfully --- both cases
1270 result in a simple SIGTRAP. In the first case we mustn't do a
1271 fixup, and in the second case we must --- but we can't tell which.
1272 Comments in the code for 'random signals' in handle_inferior_event
1273 explain how we handle this case instead.
1274
1275 Returns 1 if preparing was successful -- this thread is going to be
1276 stepped now; or 0 if displaced stepping this thread got queued. */
1277 static int
1278 displaced_step_prepare (ptid_t ptid)
1279 {
1280 struct cleanup *old_cleanups, *ignore_cleanups;
1281 struct regcache *regcache = get_thread_regcache (ptid);
1282 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1283 CORE_ADDR original, copy;
1284 ULONGEST len;
1285 struct displaced_step_closure *closure;
1286 struct displaced_step_inferior_state *displaced;
1287
1288 /* We should never reach this function if the architecture does not
1289 support displaced stepping. */
1290 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1291
1292 /* We have to displaced step one thread at a time, as we only have
1293 access to a single scratch space per inferior. */
1294
1295 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1296
1297 if (!ptid_equal (displaced->step_ptid, null_ptid))
1298 {
1299 /* Already waiting for a displaced step to finish. Defer this
1300 request and place in queue. */
1301 struct displaced_step_request *req, *new_req;
1302
1303 if (debug_displaced)
1304 fprintf_unfiltered (gdb_stdlog,
1305 "displaced: defering step of %s\n",
1306 target_pid_to_str (ptid));
1307
1308 new_req = xmalloc (sizeof (*new_req));
1309 new_req->ptid = ptid;
1310 new_req->next = NULL;
1311
1312 if (displaced->step_request_queue)
1313 {
1314 for (req = displaced->step_request_queue;
1315 req && req->next;
1316 req = req->next)
1317 ;
1318 req->next = new_req;
1319 }
1320 else
1321 displaced->step_request_queue = new_req;
1322
1323 return 0;
1324 }
1325 else
1326 {
1327 if (debug_displaced)
1328 fprintf_unfiltered (gdb_stdlog,
1329 "displaced: stepping %s now\n",
1330 target_pid_to_str (ptid));
1331 }
1332
1333 displaced_step_clear (displaced);
1334
1335 old_cleanups = save_inferior_ptid ();
1336 inferior_ptid = ptid;
1337
1338 original = regcache_read_pc (regcache);
1339
1340 copy = gdbarch_displaced_step_location (gdbarch);
1341 len = gdbarch_max_insn_length (gdbarch);
1342
1343 /* Save the original contents of the copy area. */
1344 displaced->step_saved_copy = xmalloc (len);
1345 ignore_cleanups = make_cleanup (free_current_contents,
1346 &displaced->step_saved_copy);
1347 read_memory (copy, displaced->step_saved_copy, len);
1348 if (debug_displaced)
1349 {
1350 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1351 paddress (gdbarch, copy));
1352 displaced_step_dump_bytes (gdb_stdlog,
1353 displaced->step_saved_copy,
1354 len);
1355 };
1356
1357 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1358 original, copy, regcache);
1359
1360 /* We don't support the fully-simulated case at present. */
1361 gdb_assert (closure);
1362
1363 /* Save the information we need to fix things up if the step
1364 succeeds. */
1365 displaced->step_ptid = ptid;
1366 displaced->step_gdbarch = gdbarch;
1367 displaced->step_closure = closure;
1368 displaced->step_original = original;
1369 displaced->step_copy = copy;
1370
1371 make_cleanup (displaced_step_clear_cleanup, displaced);
1372
1373 /* Resume execution at the copy. */
1374 regcache_write_pc (regcache, copy);
1375
1376 discard_cleanups (ignore_cleanups);
1377
1378 do_cleanups (old_cleanups);
1379
1380 if (debug_displaced)
1381 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1382 paddress (gdbarch, copy));
1383
1384 return 1;
1385 }
1386
1387 static void
1388 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1389 const gdb_byte *myaddr, int len)
1390 {
1391 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1392
1393 inferior_ptid = ptid;
1394 write_memory (memaddr, myaddr, len);
1395 do_cleanups (ptid_cleanup);
1396 }
1397
1398 /* Restore the contents of the copy area for thread PTID. */
1399
1400 static void
1401 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1402 ptid_t ptid)
1403 {
1404 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1405
1406 write_memory_ptid (ptid, displaced->step_copy,
1407 displaced->step_saved_copy, len);
1408 if (debug_displaced)
1409 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1410 target_pid_to_str (ptid),
1411 paddress (displaced->step_gdbarch,
1412 displaced->step_copy));
1413 }
1414
1415 static void
1416 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1417 {
1418 struct cleanup *old_cleanups;
1419 struct displaced_step_inferior_state *displaced
1420 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1421
1422 /* Was any thread of this process doing a displaced step? */
1423 if (displaced == NULL)
1424 return;
1425
1426 /* Was this event for the pid we displaced? */
1427 if (ptid_equal (displaced->step_ptid, null_ptid)
1428 || ! ptid_equal (displaced->step_ptid, event_ptid))
1429 return;
1430
1431 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1432
1433 displaced_step_restore (displaced, displaced->step_ptid);
1434
1435 /* Did the instruction complete successfully? */
1436 if (signal == TARGET_SIGNAL_TRAP)
1437 {
1438 /* Fix up the resulting state. */
1439 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1440 displaced->step_closure,
1441 displaced->step_original,
1442 displaced->step_copy,
1443 get_thread_regcache (displaced->step_ptid));
1444 }
1445 else
1446 {
1447 /* Since the instruction didn't complete, all we can do is
1448 relocate the PC. */
1449 struct regcache *regcache = get_thread_regcache (event_ptid);
1450 CORE_ADDR pc = regcache_read_pc (regcache);
1451
1452 pc = displaced->step_original + (pc - displaced->step_copy);
1453 regcache_write_pc (regcache, pc);
1454 }
1455
1456 do_cleanups (old_cleanups);
1457
1458 displaced->step_ptid = null_ptid;
1459
1460 /* Are there any pending displaced stepping requests? If so, run
1461 one now. Leave the state object around, since we're likely to
1462 need it again soon. */
1463 while (displaced->step_request_queue)
1464 {
1465 struct displaced_step_request *head;
1466 ptid_t ptid;
1467 struct regcache *regcache;
1468 struct gdbarch *gdbarch;
1469 CORE_ADDR actual_pc;
1470 struct address_space *aspace;
1471
1472 head = displaced->step_request_queue;
1473 ptid = head->ptid;
1474 displaced->step_request_queue = head->next;
1475 xfree (head);
1476
1477 context_switch (ptid);
1478
1479 regcache = get_thread_regcache (ptid);
1480 actual_pc = regcache_read_pc (regcache);
1481 aspace = get_regcache_aspace (regcache);
1482
1483 if (breakpoint_here_p (aspace, actual_pc))
1484 {
1485 if (debug_displaced)
1486 fprintf_unfiltered (gdb_stdlog,
1487 "displaced: stepping queued %s now\n",
1488 target_pid_to_str (ptid));
1489
1490 displaced_step_prepare (ptid);
1491
1492 gdbarch = get_regcache_arch (regcache);
1493
1494 if (debug_displaced)
1495 {
1496 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1497 gdb_byte buf[4];
1498
1499 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1500 paddress (gdbarch, actual_pc));
1501 read_memory (actual_pc, buf, sizeof (buf));
1502 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1503 }
1504
1505 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1506 displaced->step_closure))
1507 target_resume (ptid, 1, TARGET_SIGNAL_0);
1508 else
1509 target_resume (ptid, 0, TARGET_SIGNAL_0);
1510
1511 /* Done, we're stepping a thread. */
1512 break;
1513 }
1514 else
1515 {
1516 int step;
1517 struct thread_info *tp = inferior_thread ();
1518
1519 /* The breakpoint we were sitting under has since been
1520 removed. */
1521 tp->control.trap_expected = 0;
1522
1523 /* Go back to what we were trying to do. */
1524 step = currently_stepping (tp);
1525
1526 if (debug_displaced)
1527 fprintf_unfiltered (gdb_stdlog,
1528 "breakpoint is gone %s: step(%d)\n",
1529 target_pid_to_str (tp->ptid), step);
1530
1531 target_resume (ptid, step, TARGET_SIGNAL_0);
1532 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1533
1534 /* This request was discarded. See if there's any other
1535 thread waiting for its turn. */
1536 }
1537 }
1538 }
1539
1540 /* Update global variables holding ptids to hold NEW_PTID if they were
1541 holding OLD_PTID. */
1542 static void
1543 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1544 {
1545 struct displaced_step_request *it;
1546 struct displaced_step_inferior_state *displaced;
1547
1548 if (ptid_equal (inferior_ptid, old_ptid))
1549 inferior_ptid = new_ptid;
1550
1551 if (ptid_equal (singlestep_ptid, old_ptid))
1552 singlestep_ptid = new_ptid;
1553
1554 if (ptid_equal (deferred_step_ptid, old_ptid))
1555 deferred_step_ptid = new_ptid;
1556
1557 for (displaced = displaced_step_inferior_states;
1558 displaced;
1559 displaced = displaced->next)
1560 {
1561 if (ptid_equal (displaced->step_ptid, old_ptid))
1562 displaced->step_ptid = new_ptid;
1563
1564 for (it = displaced->step_request_queue; it; it = it->next)
1565 if (ptid_equal (it->ptid, old_ptid))
1566 it->ptid = new_ptid;
1567 }
1568 }
1569
1570 \f
1571 /* Resuming. */
1572
1573 /* Things to clean up if we QUIT out of resume (). */
1574 static void
1575 resume_cleanups (void *ignore)
1576 {
1577 normal_stop ();
1578 }
1579
1580 static const char schedlock_off[] = "off";
1581 static const char schedlock_on[] = "on";
1582 static const char schedlock_step[] = "step";
1583 static const char *scheduler_enums[] = {
1584 schedlock_off,
1585 schedlock_on,
1586 schedlock_step,
1587 NULL
1588 };
1589 static const char *scheduler_mode = schedlock_off;
1590 static void
1591 show_scheduler_mode (struct ui_file *file, int from_tty,
1592 struct cmd_list_element *c, const char *value)
1593 {
1594 fprintf_filtered (file,
1595 _("Mode for locking scheduler "
1596 "during execution is \"%s\".\n"),
1597 value);
1598 }
1599
1600 static void
1601 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1602 {
1603 if (!target_can_lock_scheduler)
1604 {
1605 scheduler_mode = schedlock_off;
1606 error (_("Target '%s' cannot support this command."), target_shortname);
1607 }
1608 }
1609
1610 /* True if execution commands resume all threads of all processes by
1611 default; otherwise, resume only threads of the current inferior
1612 process. */
1613 int sched_multi = 0;
1614
1615 /* Try to setup for software single stepping over the specified location.
1616 Return 1 if target_resume() should use hardware single step.
1617
1618 GDBARCH the current gdbarch.
1619 PC the location to step over. */
1620
1621 static int
1622 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1623 {
1624 int hw_step = 1;
1625
1626 if (execution_direction == EXEC_FORWARD
1627 && gdbarch_software_single_step_p (gdbarch)
1628 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1629 {
1630 hw_step = 0;
1631 /* Do not pull these breakpoints until after a `wait' in
1632 `wait_for_inferior'. */
1633 singlestep_breakpoints_inserted_p = 1;
1634 singlestep_ptid = inferior_ptid;
1635 singlestep_pc = pc;
1636 }
1637 return hw_step;
1638 }
1639
1640 /* Return a ptid representing the set of threads that we will proceed,
1641 in the perspective of the user/frontend. We may actually resume
1642 fewer threads at first, e.g., if a thread is stopped at a
1643 breakpoint that needs stepping-off, but that should not be visible
1644 to the user/frontend, and neither should the frontend/user be
1645 allowed to proceed any of the threads that happen to be stopped for
1646 internal run control handling, if a previous command wanted them
1647 resumed. */
1648
1649 ptid_t
1650 user_visible_resume_ptid (int step)
1651 {
1652 /* By default, resume all threads of all processes. */
1653 ptid_t resume_ptid = RESUME_ALL;
1654
1655 /* Maybe resume only all threads of the current process. */
1656 if (!sched_multi && target_supports_multi_process ())
1657 {
1658 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1659 }
1660
1661 /* Maybe resume a single thread after all. */
1662 if (non_stop)
1663 {
1664 /* With non-stop mode on, threads are always handled
1665 individually. */
1666 resume_ptid = inferior_ptid;
1667 }
1668 else if ((scheduler_mode == schedlock_on)
1669 || (scheduler_mode == schedlock_step
1670 && (step || singlestep_breakpoints_inserted_p)))
1671 {
1672 /* User-settable 'scheduler' mode requires solo thread resume. */
1673 resume_ptid = inferior_ptid;
1674 }
1675
1676 return resume_ptid;
1677 }
1678
1679 /* Resume the inferior, but allow a QUIT. This is useful if the user
1680 wants to interrupt some lengthy single-stepping operation
1681 (for child processes, the SIGINT goes to the inferior, and so
1682 we get a SIGINT random_signal, but for remote debugging and perhaps
1683 other targets, that's not true).
1684
1685 STEP nonzero if we should step (zero to continue instead).
1686 SIG is the signal to give the inferior (zero for none). */
1687 void
1688 resume (int step, enum target_signal sig)
1689 {
1690 int should_resume = 1;
1691 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1692 struct regcache *regcache = get_current_regcache ();
1693 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1694 struct thread_info *tp = inferior_thread ();
1695 CORE_ADDR pc = regcache_read_pc (regcache);
1696 struct address_space *aspace = get_regcache_aspace (regcache);
1697
1698 QUIT;
1699
1700 if (current_inferior ()->waiting_for_vfork_done)
1701 {
1702 /* Don't try to single-step a vfork parent that is waiting for
1703 the child to get out of the shared memory region (by exec'ing
1704 or exiting). This is particularly important on software
1705 single-step archs, as the child process would trip on the
1706 software single step breakpoint inserted for the parent
1707 process. Since the parent will not actually execute any
1708 instruction until the child is out of the shared region (such
1709 are vfork's semantics), it is safe to simply continue it.
1710 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1711 the parent, and tell it to `keep_going', which automatically
1712 re-sets it stepping. */
1713 if (debug_infrun)
1714 fprintf_unfiltered (gdb_stdlog,
1715 "infrun: resume : clear step\n");
1716 step = 0;
1717 }
1718
1719 if (debug_infrun)
1720 fprintf_unfiltered (gdb_stdlog,
1721 "infrun: resume (step=%d, signal=%d), "
1722 "trap_expected=%d, current thread [%s] at %s\n",
1723 step, sig, tp->control.trap_expected,
1724 target_pid_to_str (inferior_ptid),
1725 paddress (gdbarch, pc));
1726
1727 /* Normally, by the time we reach `resume', the breakpoints are either
1728 removed or inserted, as appropriate. The exception is if we're sitting
1729 at a permanent breakpoint; we need to step over it, but permanent
1730 breakpoints can't be removed. So we have to test for it here. */
1731 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1732 {
1733 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1734 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1735 else
1736 error (_("\
1737 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1738 how to step past a permanent breakpoint on this architecture. Try using\n\
1739 a command like `return' or `jump' to continue execution."));
1740 }
1741
1742 /* If enabled, step over breakpoints by executing a copy of the
1743 instruction at a different address.
1744
1745 We can't use displaced stepping when we have a signal to deliver;
1746 the comments for displaced_step_prepare explain why. The
1747 comments in the handle_inferior event for dealing with 'random
1748 signals' explain what we do instead.
1749
1750 We can't use displaced stepping when we are waiting for vfork_done
1751 event, displaced stepping breaks the vfork child similarly as single
1752 step software breakpoint. */
1753 if (use_displaced_stepping (gdbarch)
1754 && (tp->control.trap_expected
1755 || (step && gdbarch_software_single_step_p (gdbarch)))
1756 && sig == TARGET_SIGNAL_0
1757 && !current_inferior ()->waiting_for_vfork_done)
1758 {
1759 struct displaced_step_inferior_state *displaced;
1760
1761 if (!displaced_step_prepare (inferior_ptid))
1762 {
1763 /* Got placed in displaced stepping queue. Will be resumed
1764 later when all the currently queued displaced stepping
1765 requests finish. The thread is not executing at this point,
1766 and the call to set_executing will be made later. But we
1767 need to call set_running here, since from frontend point of view,
1768 the thread is running. */
1769 set_running (inferior_ptid, 1);
1770 discard_cleanups (old_cleanups);
1771 return;
1772 }
1773
1774 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1775 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1776 displaced->step_closure);
1777 }
1778
1779 /* Do we need to do it the hard way, w/temp breakpoints? */
1780 else if (step)
1781 step = maybe_software_singlestep (gdbarch, pc);
1782
1783 /* Currently, our software single-step implementation leads to different
1784 results than hardware single-stepping in one situation: when stepping
1785 into delivering a signal which has an associated signal handler,
1786 hardware single-step will stop at the first instruction of the handler,
1787 while software single-step will simply skip execution of the handler.
1788
1789 For now, this difference in behavior is accepted since there is no
1790 easy way to actually implement single-stepping into a signal handler
1791 without kernel support.
1792
1793 However, there is one scenario where this difference leads to follow-on
1794 problems: if we're stepping off a breakpoint by removing all breakpoints
1795 and then single-stepping. In this case, the software single-step
1796 behavior means that even if there is a *breakpoint* in the signal
1797 handler, GDB still would not stop.
1798
1799 Fortunately, we can at least fix this particular issue. We detect
1800 here the case where we are about to deliver a signal while software
1801 single-stepping with breakpoints removed. In this situation, we
1802 revert the decisions to remove all breakpoints and insert single-
1803 step breakpoints, and instead we install a step-resume breakpoint
1804 at the current address, deliver the signal without stepping, and
1805 once we arrive back at the step-resume breakpoint, actually step
1806 over the breakpoint we originally wanted to step over. */
1807 if (singlestep_breakpoints_inserted_p
1808 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1809 {
1810 /* If we have nested signals or a pending signal is delivered
1811 immediately after a handler returns, might might already have
1812 a step-resume breakpoint set on the earlier handler. We cannot
1813 set another step-resume breakpoint; just continue on until the
1814 original breakpoint is hit. */
1815 if (tp->control.step_resume_breakpoint == NULL)
1816 {
1817 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1818 tp->step_after_step_resume_breakpoint = 1;
1819 }
1820
1821 remove_single_step_breakpoints ();
1822 singlestep_breakpoints_inserted_p = 0;
1823
1824 insert_breakpoints ();
1825 tp->control.trap_expected = 0;
1826 }
1827
1828 if (should_resume)
1829 {
1830 ptid_t resume_ptid;
1831
1832 /* If STEP is set, it's a request to use hardware stepping
1833 facilities. But in that case, we should never
1834 use singlestep breakpoint. */
1835 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1836
1837 /* Decide the set of threads to ask the target to resume. Start
1838 by assuming everything will be resumed, than narrow the set
1839 by applying increasingly restricting conditions. */
1840 resume_ptid = user_visible_resume_ptid (step);
1841
1842 /* Maybe resume a single thread after all. */
1843 if (singlestep_breakpoints_inserted_p
1844 && stepping_past_singlestep_breakpoint)
1845 {
1846 /* The situation here is as follows. In thread T1 we wanted to
1847 single-step. Lacking hardware single-stepping we've
1848 set breakpoint at the PC of the next instruction -- call it
1849 P. After resuming, we've hit that breakpoint in thread T2.
1850 Now we've removed original breakpoint, inserted breakpoint
1851 at P+1, and try to step to advance T2 past breakpoint.
1852 We need to step only T2, as if T1 is allowed to freely run,
1853 it can run past P, and if other threads are allowed to run,
1854 they can hit breakpoint at P+1, and nested hits of single-step
1855 breakpoints is not something we'd want -- that's complicated
1856 to support, and has no value. */
1857 resume_ptid = inferior_ptid;
1858 }
1859 else if ((step || singlestep_breakpoints_inserted_p)
1860 && tp->control.trap_expected)
1861 {
1862 /* We're allowing a thread to run past a breakpoint it has
1863 hit, by single-stepping the thread with the breakpoint
1864 removed. In which case, we need to single-step only this
1865 thread, and keep others stopped, as they can miss this
1866 breakpoint if allowed to run.
1867
1868 The current code actually removes all breakpoints when
1869 doing this, not just the one being stepped over, so if we
1870 let other threads run, we can actually miss any
1871 breakpoint, not just the one at PC. */
1872 resume_ptid = inferior_ptid;
1873 }
1874
1875 if (gdbarch_cannot_step_breakpoint (gdbarch))
1876 {
1877 /* Most targets can step a breakpoint instruction, thus
1878 executing it normally. But if this one cannot, just
1879 continue and we will hit it anyway. */
1880 if (step && breakpoint_inserted_here_p (aspace, pc))
1881 step = 0;
1882 }
1883
1884 if (debug_displaced
1885 && use_displaced_stepping (gdbarch)
1886 && tp->control.trap_expected)
1887 {
1888 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1889 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1890 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1891 gdb_byte buf[4];
1892
1893 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1894 paddress (resume_gdbarch, actual_pc));
1895 read_memory (actual_pc, buf, sizeof (buf));
1896 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1897 }
1898
1899 /* Install inferior's terminal modes. */
1900 target_terminal_inferior ();
1901
1902 /* Avoid confusing the next resume, if the next stop/resume
1903 happens to apply to another thread. */
1904 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1905
1906 /* Advise target which signals may be handled silently. If we have
1907 removed breakpoints because we are stepping over one (which can
1908 happen only if we are not using displaced stepping), we need to
1909 receive all signals to avoid accidentally skipping a breakpoint
1910 during execution of a signal handler. */
1911 if ((step || singlestep_breakpoints_inserted_p)
1912 && tp->control.trap_expected
1913 && !use_displaced_stepping (gdbarch))
1914 target_pass_signals (0, NULL);
1915 else
1916 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1917
1918 target_resume (resume_ptid, step, sig);
1919 }
1920
1921 discard_cleanups (old_cleanups);
1922 }
1923 \f
1924 /* Proceeding. */
1925
1926 /* Clear out all variables saying what to do when inferior is continued.
1927 First do this, then set the ones you want, then call `proceed'. */
1928
1929 static void
1930 clear_proceed_status_thread (struct thread_info *tp)
1931 {
1932 if (debug_infrun)
1933 fprintf_unfiltered (gdb_stdlog,
1934 "infrun: clear_proceed_status_thread (%s)\n",
1935 target_pid_to_str (tp->ptid));
1936
1937 tp->control.trap_expected = 0;
1938 tp->control.step_range_start = 0;
1939 tp->control.step_range_end = 0;
1940 tp->control.step_frame_id = null_frame_id;
1941 tp->control.step_stack_frame_id = null_frame_id;
1942 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1943 tp->stop_requested = 0;
1944
1945 tp->control.stop_step = 0;
1946
1947 tp->control.proceed_to_finish = 0;
1948
1949 /* Discard any remaining commands or status from previous stop. */
1950 bpstat_clear (&tp->control.stop_bpstat);
1951 }
1952
1953 static int
1954 clear_proceed_status_callback (struct thread_info *tp, void *data)
1955 {
1956 if (is_exited (tp->ptid))
1957 return 0;
1958
1959 clear_proceed_status_thread (tp);
1960 return 0;
1961 }
1962
1963 void
1964 clear_proceed_status (void)
1965 {
1966 if (!non_stop)
1967 {
1968 /* In all-stop mode, delete the per-thread status of all
1969 threads, even if inferior_ptid is null_ptid, there may be
1970 threads on the list. E.g., we may be launching a new
1971 process, while selecting the executable. */
1972 iterate_over_threads (clear_proceed_status_callback, NULL);
1973 }
1974
1975 if (!ptid_equal (inferior_ptid, null_ptid))
1976 {
1977 struct inferior *inferior;
1978
1979 if (non_stop)
1980 {
1981 /* If in non-stop mode, only delete the per-thread status of
1982 the current thread. */
1983 clear_proceed_status_thread (inferior_thread ());
1984 }
1985
1986 inferior = current_inferior ();
1987 inferior->control.stop_soon = NO_STOP_QUIETLY;
1988 }
1989
1990 stop_after_trap = 0;
1991
1992 observer_notify_about_to_proceed ();
1993
1994 if (stop_registers)
1995 {
1996 regcache_xfree (stop_registers);
1997 stop_registers = NULL;
1998 }
1999 }
2000
2001 /* Check the current thread against the thread that reported the most recent
2002 event. If a step-over is required return TRUE and set the current thread
2003 to the old thread. Otherwise return FALSE.
2004
2005 This should be suitable for any targets that support threads. */
2006
2007 static int
2008 prepare_to_proceed (int step)
2009 {
2010 ptid_t wait_ptid;
2011 struct target_waitstatus wait_status;
2012 int schedlock_enabled;
2013
2014 /* With non-stop mode on, threads are always handled individually. */
2015 gdb_assert (! non_stop);
2016
2017 /* Get the last target status returned by target_wait(). */
2018 get_last_target_status (&wait_ptid, &wait_status);
2019
2020 /* Make sure we were stopped at a breakpoint. */
2021 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2022 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2023 && wait_status.value.sig != TARGET_SIGNAL_ILL
2024 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2025 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2026 {
2027 return 0;
2028 }
2029
2030 schedlock_enabled = (scheduler_mode == schedlock_on
2031 || (scheduler_mode == schedlock_step
2032 && step));
2033
2034 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2035 if (schedlock_enabled)
2036 return 0;
2037
2038 /* Don't switch over if we're about to resume some other process
2039 other than WAIT_PTID's, and schedule-multiple is off. */
2040 if (!sched_multi
2041 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2042 return 0;
2043
2044 /* Switched over from WAIT_PID. */
2045 if (!ptid_equal (wait_ptid, minus_one_ptid)
2046 && !ptid_equal (inferior_ptid, wait_ptid))
2047 {
2048 struct regcache *regcache = get_thread_regcache (wait_ptid);
2049
2050 if (breakpoint_here_p (get_regcache_aspace (regcache),
2051 regcache_read_pc (regcache)))
2052 {
2053 /* If stepping, remember current thread to switch back to. */
2054 if (step)
2055 deferred_step_ptid = inferior_ptid;
2056
2057 /* Switch back to WAIT_PID thread. */
2058 switch_to_thread (wait_ptid);
2059
2060 if (debug_infrun)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "infrun: prepare_to_proceed (step=%d), "
2063 "switched to [%s]\n",
2064 step, target_pid_to_str (inferior_ptid));
2065
2066 /* We return 1 to indicate that there is a breakpoint here,
2067 so we need to step over it before continuing to avoid
2068 hitting it straight away. */
2069 return 1;
2070 }
2071 }
2072
2073 return 0;
2074 }
2075
2076 /* Basic routine for continuing the program in various fashions.
2077
2078 ADDR is the address to resume at, or -1 for resume where stopped.
2079 SIGGNAL is the signal to give it, or 0 for none,
2080 or -1 for act according to how it stopped.
2081 STEP is nonzero if should trap after one instruction.
2082 -1 means return after that and print nothing.
2083 You should probably set various step_... variables
2084 before calling here, if you are stepping.
2085
2086 You should call clear_proceed_status before calling proceed. */
2087
2088 void
2089 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2090 {
2091 struct regcache *regcache;
2092 struct gdbarch *gdbarch;
2093 struct thread_info *tp;
2094 CORE_ADDR pc;
2095 struct address_space *aspace;
2096 int oneproc = 0;
2097
2098 /* If we're stopped at a fork/vfork, follow the branch set by the
2099 "set follow-fork-mode" command; otherwise, we'll just proceed
2100 resuming the current thread. */
2101 if (!follow_fork ())
2102 {
2103 /* The target for some reason decided not to resume. */
2104 normal_stop ();
2105 if (target_can_async_p ())
2106 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2107 return;
2108 }
2109
2110 /* We'll update this if & when we switch to a new thread. */
2111 previous_inferior_ptid = inferior_ptid;
2112
2113 regcache = get_current_regcache ();
2114 gdbarch = get_regcache_arch (regcache);
2115 aspace = get_regcache_aspace (regcache);
2116 pc = regcache_read_pc (regcache);
2117
2118 if (step > 0)
2119 step_start_function = find_pc_function (pc);
2120 if (step < 0)
2121 stop_after_trap = 1;
2122
2123 if (addr == (CORE_ADDR) -1)
2124 {
2125 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2126 && execution_direction != EXEC_REVERSE)
2127 /* There is a breakpoint at the address we will resume at,
2128 step one instruction before inserting breakpoints so that
2129 we do not stop right away (and report a second hit at this
2130 breakpoint).
2131
2132 Note, we don't do this in reverse, because we won't
2133 actually be executing the breakpoint insn anyway.
2134 We'll be (un-)executing the previous instruction. */
2135
2136 oneproc = 1;
2137 else if (gdbarch_single_step_through_delay_p (gdbarch)
2138 && gdbarch_single_step_through_delay (gdbarch,
2139 get_current_frame ()))
2140 /* We stepped onto an instruction that needs to be stepped
2141 again before re-inserting the breakpoint, do so. */
2142 oneproc = 1;
2143 }
2144 else
2145 {
2146 regcache_write_pc (regcache, addr);
2147 }
2148
2149 if (debug_infrun)
2150 fprintf_unfiltered (gdb_stdlog,
2151 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2152 paddress (gdbarch, addr), siggnal, step);
2153
2154 if (non_stop)
2155 /* In non-stop, each thread is handled individually. The context
2156 must already be set to the right thread here. */
2157 ;
2158 else
2159 {
2160 /* In a multi-threaded task we may select another thread and
2161 then continue or step.
2162
2163 But if the old thread was stopped at a breakpoint, it will
2164 immediately cause another breakpoint stop without any
2165 execution (i.e. it will report a breakpoint hit incorrectly).
2166 So we must step over it first.
2167
2168 prepare_to_proceed checks the current thread against the
2169 thread that reported the most recent event. If a step-over
2170 is required it returns TRUE and sets the current thread to
2171 the old thread. */
2172 if (prepare_to_proceed (step))
2173 oneproc = 1;
2174 }
2175
2176 /* prepare_to_proceed may change the current thread. */
2177 tp = inferior_thread ();
2178
2179 if (oneproc)
2180 {
2181 tp->control.trap_expected = 1;
2182 /* If displaced stepping is enabled, we can step over the
2183 breakpoint without hitting it, so leave all breakpoints
2184 inserted. Otherwise we need to disable all breakpoints, step
2185 one instruction, and then re-add them when that step is
2186 finished. */
2187 if (!use_displaced_stepping (gdbarch))
2188 remove_breakpoints ();
2189 }
2190
2191 /* We can insert breakpoints if we're not trying to step over one,
2192 or if we are stepping over one but we're using displaced stepping
2193 to do so. */
2194 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2195 insert_breakpoints ();
2196
2197 if (!non_stop)
2198 {
2199 /* Pass the last stop signal to the thread we're resuming,
2200 irrespective of whether the current thread is the thread that
2201 got the last event or not. This was historically GDB's
2202 behaviour before keeping a stop_signal per thread. */
2203
2204 struct thread_info *last_thread;
2205 ptid_t last_ptid;
2206 struct target_waitstatus last_status;
2207
2208 get_last_target_status (&last_ptid, &last_status);
2209 if (!ptid_equal (inferior_ptid, last_ptid)
2210 && !ptid_equal (last_ptid, null_ptid)
2211 && !ptid_equal (last_ptid, minus_one_ptid))
2212 {
2213 last_thread = find_thread_ptid (last_ptid);
2214 if (last_thread)
2215 {
2216 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2217 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2218 }
2219 }
2220 }
2221
2222 if (siggnal != TARGET_SIGNAL_DEFAULT)
2223 tp->suspend.stop_signal = siggnal;
2224 /* If this signal should not be seen by program,
2225 give it zero. Used for debugging signals. */
2226 else if (!signal_program[tp->suspend.stop_signal])
2227 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2228
2229 annotate_starting ();
2230
2231 /* Make sure that output from GDB appears before output from the
2232 inferior. */
2233 gdb_flush (gdb_stdout);
2234
2235 /* Refresh prev_pc value just prior to resuming. This used to be
2236 done in stop_stepping, however, setting prev_pc there did not handle
2237 scenarios such as inferior function calls or returning from
2238 a function via the return command. In those cases, the prev_pc
2239 value was not set properly for subsequent commands. The prev_pc value
2240 is used to initialize the starting line number in the ecs. With an
2241 invalid value, the gdb next command ends up stopping at the position
2242 represented by the next line table entry past our start position.
2243 On platforms that generate one line table entry per line, this
2244 is not a problem. However, on the ia64, the compiler generates
2245 extraneous line table entries that do not increase the line number.
2246 When we issue the gdb next command on the ia64 after an inferior call
2247 or a return command, we often end up a few instructions forward, still
2248 within the original line we started.
2249
2250 An attempt was made to refresh the prev_pc at the same time the
2251 execution_control_state is initialized (for instance, just before
2252 waiting for an inferior event). But this approach did not work
2253 because of platforms that use ptrace, where the pc register cannot
2254 be read unless the inferior is stopped. At that point, we are not
2255 guaranteed the inferior is stopped and so the regcache_read_pc() call
2256 can fail. Setting the prev_pc value here ensures the value is updated
2257 correctly when the inferior is stopped. */
2258 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2259
2260 /* Fill in with reasonable starting values. */
2261 init_thread_stepping_state (tp);
2262
2263 /* Reset to normal state. */
2264 init_infwait_state ();
2265
2266 /* Resume inferior. */
2267 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2268
2269 /* Wait for it to stop (if not standalone)
2270 and in any case decode why it stopped, and act accordingly. */
2271 /* Do this only if we are not using the event loop, or if the target
2272 does not support asynchronous execution. */
2273 if (!target_can_async_p ())
2274 {
2275 wait_for_inferior ();
2276 normal_stop ();
2277 }
2278 }
2279 \f
2280
2281 /* Start remote-debugging of a machine over a serial link. */
2282
2283 void
2284 start_remote (int from_tty)
2285 {
2286 struct inferior *inferior;
2287
2288 inferior = current_inferior ();
2289 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2290
2291 /* Always go on waiting for the target, regardless of the mode. */
2292 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2293 indicate to wait_for_inferior that a target should timeout if
2294 nothing is returned (instead of just blocking). Because of this,
2295 targets expecting an immediate response need to, internally, set
2296 things up so that the target_wait() is forced to eventually
2297 timeout. */
2298 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2299 differentiate to its caller what the state of the target is after
2300 the initial open has been performed. Here we're assuming that
2301 the target has stopped. It should be possible to eventually have
2302 target_open() return to the caller an indication that the target
2303 is currently running and GDB state should be set to the same as
2304 for an async run. */
2305 wait_for_inferior ();
2306
2307 /* Now that the inferior has stopped, do any bookkeeping like
2308 loading shared libraries. We want to do this before normal_stop,
2309 so that the displayed frame is up to date. */
2310 post_create_inferior (&current_target, from_tty);
2311
2312 normal_stop ();
2313 }
2314
2315 /* Initialize static vars when a new inferior begins. */
2316
2317 void
2318 init_wait_for_inferior (void)
2319 {
2320 /* These are meaningless until the first time through wait_for_inferior. */
2321
2322 breakpoint_init_inferior (inf_starting);
2323
2324 clear_proceed_status ();
2325
2326 stepping_past_singlestep_breakpoint = 0;
2327 deferred_step_ptid = null_ptid;
2328
2329 target_last_wait_ptid = minus_one_ptid;
2330
2331 previous_inferior_ptid = inferior_ptid;
2332 init_infwait_state ();
2333
2334 /* Discard any skipped inlined frames. */
2335 clear_inline_frame_state (minus_one_ptid);
2336 }
2337
2338 \f
2339 /* This enum encodes possible reasons for doing a target_wait, so that
2340 wfi can call target_wait in one place. (Ultimately the call will be
2341 moved out of the infinite loop entirely.) */
2342
2343 enum infwait_states
2344 {
2345 infwait_normal_state,
2346 infwait_thread_hop_state,
2347 infwait_step_watch_state,
2348 infwait_nonstep_watch_state
2349 };
2350
2351 /* The PTID we'll do a target_wait on.*/
2352 ptid_t waiton_ptid;
2353
2354 /* Current inferior wait state. */
2355 enum infwait_states infwait_state;
2356
2357 /* Data to be passed around while handling an event. This data is
2358 discarded between events. */
2359 struct execution_control_state
2360 {
2361 ptid_t ptid;
2362 /* The thread that got the event, if this was a thread event; NULL
2363 otherwise. */
2364 struct thread_info *event_thread;
2365
2366 struct target_waitstatus ws;
2367 int random_signal;
2368 int stop_func_filled_in;
2369 CORE_ADDR stop_func_start;
2370 CORE_ADDR stop_func_end;
2371 char *stop_func_name;
2372 int new_thread_event;
2373 int wait_some_more;
2374 };
2375
2376 static void handle_inferior_event (struct execution_control_state *ecs);
2377
2378 static void handle_step_into_function (struct gdbarch *gdbarch,
2379 struct execution_control_state *ecs);
2380 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2381 struct execution_control_state *ecs);
2382 static void check_exception_resume (struct execution_control_state *,
2383 struct frame_info *, struct symbol *);
2384
2385 static void stop_stepping (struct execution_control_state *ecs);
2386 static void prepare_to_wait (struct execution_control_state *ecs);
2387 static void keep_going (struct execution_control_state *ecs);
2388
2389 /* Callback for iterate over threads. If the thread is stopped, but
2390 the user/frontend doesn't know about that yet, go through
2391 normal_stop, as if the thread had just stopped now. ARG points at
2392 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2393 ptid_is_pid(PTID) is true, applies to all threads of the process
2394 pointed at by PTID. Otherwise, apply only to the thread pointed by
2395 PTID. */
2396
2397 static int
2398 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2399 {
2400 ptid_t ptid = * (ptid_t *) arg;
2401
2402 if ((ptid_equal (info->ptid, ptid)
2403 || ptid_equal (minus_one_ptid, ptid)
2404 || (ptid_is_pid (ptid)
2405 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2406 && is_running (info->ptid)
2407 && !is_executing (info->ptid))
2408 {
2409 struct cleanup *old_chain;
2410 struct execution_control_state ecss;
2411 struct execution_control_state *ecs = &ecss;
2412
2413 memset (ecs, 0, sizeof (*ecs));
2414
2415 old_chain = make_cleanup_restore_current_thread ();
2416
2417 switch_to_thread (info->ptid);
2418
2419 /* Go through handle_inferior_event/normal_stop, so we always
2420 have consistent output as if the stop event had been
2421 reported. */
2422 ecs->ptid = info->ptid;
2423 ecs->event_thread = find_thread_ptid (info->ptid);
2424 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2425 ecs->ws.value.sig = TARGET_SIGNAL_0;
2426
2427 handle_inferior_event (ecs);
2428
2429 if (!ecs->wait_some_more)
2430 {
2431 struct thread_info *tp;
2432
2433 normal_stop ();
2434
2435 /* Finish off the continuations. */
2436 tp = inferior_thread ();
2437 do_all_intermediate_continuations_thread (tp, 1);
2438 do_all_continuations_thread (tp, 1);
2439 }
2440
2441 do_cleanups (old_chain);
2442 }
2443
2444 return 0;
2445 }
2446
2447 /* This function is attached as a "thread_stop_requested" observer.
2448 Cleanup local state that assumed the PTID was to be resumed, and
2449 report the stop to the frontend. */
2450
2451 static void
2452 infrun_thread_stop_requested (ptid_t ptid)
2453 {
2454 struct displaced_step_inferior_state *displaced;
2455
2456 /* PTID was requested to stop. Remove it from the displaced
2457 stepping queue, so we don't try to resume it automatically. */
2458
2459 for (displaced = displaced_step_inferior_states;
2460 displaced;
2461 displaced = displaced->next)
2462 {
2463 struct displaced_step_request *it, **prev_next_p;
2464
2465 it = displaced->step_request_queue;
2466 prev_next_p = &displaced->step_request_queue;
2467 while (it)
2468 {
2469 if (ptid_match (it->ptid, ptid))
2470 {
2471 *prev_next_p = it->next;
2472 it->next = NULL;
2473 xfree (it);
2474 }
2475 else
2476 {
2477 prev_next_p = &it->next;
2478 }
2479
2480 it = *prev_next_p;
2481 }
2482 }
2483
2484 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2485 }
2486
2487 static void
2488 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2489 {
2490 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2491 nullify_last_target_wait_ptid ();
2492 }
2493
2494 /* Callback for iterate_over_threads. */
2495
2496 static int
2497 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2498 {
2499 if (is_exited (info->ptid))
2500 return 0;
2501
2502 delete_step_resume_breakpoint (info);
2503 delete_exception_resume_breakpoint (info);
2504 return 0;
2505 }
2506
2507 /* In all-stop, delete the step resume breakpoint of any thread that
2508 had one. In non-stop, delete the step resume breakpoint of the
2509 thread that just stopped. */
2510
2511 static void
2512 delete_step_thread_step_resume_breakpoint (void)
2513 {
2514 if (!target_has_execution
2515 || ptid_equal (inferior_ptid, null_ptid))
2516 /* If the inferior has exited, we have already deleted the step
2517 resume breakpoints out of GDB's lists. */
2518 return;
2519
2520 if (non_stop)
2521 {
2522 /* If in non-stop mode, only delete the step-resume or
2523 longjmp-resume breakpoint of the thread that just stopped
2524 stepping. */
2525 struct thread_info *tp = inferior_thread ();
2526
2527 delete_step_resume_breakpoint (tp);
2528 delete_exception_resume_breakpoint (tp);
2529 }
2530 else
2531 /* In all-stop mode, delete all step-resume and longjmp-resume
2532 breakpoints of any thread that had them. */
2533 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2534 }
2535
2536 /* A cleanup wrapper. */
2537
2538 static void
2539 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2540 {
2541 delete_step_thread_step_resume_breakpoint ();
2542 }
2543
2544 /* Pretty print the results of target_wait, for debugging purposes. */
2545
2546 static void
2547 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2548 const struct target_waitstatus *ws)
2549 {
2550 char *status_string = target_waitstatus_to_string (ws);
2551 struct ui_file *tmp_stream = mem_fileopen ();
2552 char *text;
2553
2554 /* The text is split over several lines because it was getting too long.
2555 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2556 output as a unit; we want only one timestamp printed if debug_timestamp
2557 is set. */
2558
2559 fprintf_unfiltered (tmp_stream,
2560 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2561 if (PIDGET (waiton_ptid) != -1)
2562 fprintf_unfiltered (tmp_stream,
2563 " [%s]", target_pid_to_str (waiton_ptid));
2564 fprintf_unfiltered (tmp_stream, ", status) =\n");
2565 fprintf_unfiltered (tmp_stream,
2566 "infrun: %d [%s],\n",
2567 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2568 fprintf_unfiltered (tmp_stream,
2569 "infrun: %s\n",
2570 status_string);
2571
2572 text = ui_file_xstrdup (tmp_stream, NULL);
2573
2574 /* This uses %s in part to handle %'s in the text, but also to avoid
2575 a gcc error: the format attribute requires a string literal. */
2576 fprintf_unfiltered (gdb_stdlog, "%s", text);
2577
2578 xfree (status_string);
2579 xfree (text);
2580 ui_file_delete (tmp_stream);
2581 }
2582
2583 /* Prepare and stabilize the inferior for detaching it. E.g.,
2584 detaching while a thread is displaced stepping is a recipe for
2585 crashing it, as nothing would readjust the PC out of the scratch
2586 pad. */
2587
2588 void
2589 prepare_for_detach (void)
2590 {
2591 struct inferior *inf = current_inferior ();
2592 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2593 struct cleanup *old_chain_1;
2594 struct displaced_step_inferior_state *displaced;
2595
2596 displaced = get_displaced_stepping_state (inf->pid);
2597
2598 /* Is any thread of this process displaced stepping? If not,
2599 there's nothing else to do. */
2600 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2601 return;
2602
2603 if (debug_infrun)
2604 fprintf_unfiltered (gdb_stdlog,
2605 "displaced-stepping in-process while detaching");
2606
2607 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2608 inf->detaching = 1;
2609
2610 while (!ptid_equal (displaced->step_ptid, null_ptid))
2611 {
2612 struct cleanup *old_chain_2;
2613 struct execution_control_state ecss;
2614 struct execution_control_state *ecs;
2615
2616 ecs = &ecss;
2617 memset (ecs, 0, sizeof (*ecs));
2618
2619 overlay_cache_invalid = 1;
2620
2621 if (deprecated_target_wait_hook)
2622 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2623 else
2624 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2625
2626 if (debug_infrun)
2627 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2628
2629 /* If an error happens while handling the event, propagate GDB's
2630 knowledge of the executing state to the frontend/user running
2631 state. */
2632 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2633 &minus_one_ptid);
2634
2635 /* In non-stop mode, each thread is handled individually.
2636 Switch early, so the global state is set correctly for this
2637 thread. */
2638 if (non_stop
2639 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2640 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2641 context_switch (ecs->ptid);
2642
2643 /* Now figure out what to do with the result of the result. */
2644 handle_inferior_event (ecs);
2645
2646 /* No error, don't finish the state yet. */
2647 discard_cleanups (old_chain_2);
2648
2649 /* Breakpoints and watchpoints are not installed on the target
2650 at this point, and signals are passed directly to the
2651 inferior, so this must mean the process is gone. */
2652 if (!ecs->wait_some_more)
2653 {
2654 discard_cleanups (old_chain_1);
2655 error (_("Program exited while detaching"));
2656 }
2657 }
2658
2659 discard_cleanups (old_chain_1);
2660 }
2661
2662 /* Wait for control to return from inferior to debugger.
2663
2664 If inferior gets a signal, we may decide to start it up again
2665 instead of returning. That is why there is a loop in this function.
2666 When this function actually returns it means the inferior
2667 should be left stopped and GDB should read more commands. */
2668
2669 void
2670 wait_for_inferior (void)
2671 {
2672 struct cleanup *old_cleanups;
2673 struct execution_control_state ecss;
2674 struct execution_control_state *ecs;
2675
2676 if (debug_infrun)
2677 fprintf_unfiltered
2678 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2679
2680 old_cleanups =
2681 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2682
2683 ecs = &ecss;
2684 memset (ecs, 0, sizeof (*ecs));
2685
2686 while (1)
2687 {
2688 struct cleanup *old_chain;
2689
2690 overlay_cache_invalid = 1;
2691
2692 if (deprecated_target_wait_hook)
2693 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2694 else
2695 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2696
2697 if (debug_infrun)
2698 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2699
2700 /* If an error happens while handling the event, propagate GDB's
2701 knowledge of the executing state to the frontend/user running
2702 state. */
2703 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2704
2705 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2706 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2707 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2708
2709 /* Now figure out what to do with the result of the result. */
2710 handle_inferior_event (ecs);
2711
2712 /* No error, don't finish the state yet. */
2713 discard_cleanups (old_chain);
2714
2715 if (!ecs->wait_some_more)
2716 break;
2717 }
2718
2719 do_cleanups (old_cleanups);
2720 }
2721
2722 /* Asynchronous version of wait_for_inferior. It is called by the
2723 event loop whenever a change of state is detected on the file
2724 descriptor corresponding to the target. It can be called more than
2725 once to complete a single execution command. In such cases we need
2726 to keep the state in a global variable ECSS. If it is the last time
2727 that this function is called for a single execution command, then
2728 report to the user that the inferior has stopped, and do the
2729 necessary cleanups. */
2730
2731 void
2732 fetch_inferior_event (void *client_data)
2733 {
2734 struct execution_control_state ecss;
2735 struct execution_control_state *ecs = &ecss;
2736 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2737 struct cleanup *ts_old_chain;
2738 int was_sync = sync_execution;
2739 int cmd_done = 0;
2740
2741 memset (ecs, 0, sizeof (*ecs));
2742
2743 /* We're handling a live event, so make sure we're doing live
2744 debugging. If we're looking at traceframes while the target is
2745 running, we're going to need to get back to that mode after
2746 handling the event. */
2747 if (non_stop)
2748 {
2749 make_cleanup_restore_current_traceframe ();
2750 set_current_traceframe (-1);
2751 }
2752
2753 if (non_stop)
2754 /* In non-stop mode, the user/frontend should not notice a thread
2755 switch due to internal events. Make sure we reverse to the
2756 user selected thread and frame after handling the event and
2757 running any breakpoint commands. */
2758 make_cleanup_restore_current_thread ();
2759
2760 overlay_cache_invalid = 1;
2761
2762 make_cleanup_restore_integer (&execution_direction);
2763 execution_direction = target_execution_direction ();
2764
2765 if (deprecated_target_wait_hook)
2766 ecs->ptid =
2767 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2768 else
2769 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2770
2771 if (debug_infrun)
2772 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2773
2774 if (non_stop
2775 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2776 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2777 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2778 /* In non-stop mode, each thread is handled individually. Switch
2779 early, so the global state is set correctly for this
2780 thread. */
2781 context_switch (ecs->ptid);
2782
2783 /* If an error happens while handling the event, propagate GDB's
2784 knowledge of the executing state to the frontend/user running
2785 state. */
2786 if (!non_stop)
2787 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2788 else
2789 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2790
2791 /* Get executed before make_cleanup_restore_current_thread above to apply
2792 still for the thread which has thrown the exception. */
2793 make_bpstat_clear_actions_cleanup ();
2794
2795 /* Now figure out what to do with the result of the result. */
2796 handle_inferior_event (ecs);
2797
2798 if (!ecs->wait_some_more)
2799 {
2800 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2801
2802 delete_step_thread_step_resume_breakpoint ();
2803
2804 /* We may not find an inferior if this was a process exit. */
2805 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2806 normal_stop ();
2807
2808 if (target_has_execution
2809 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2810 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2811 && ecs->event_thread->step_multi
2812 && ecs->event_thread->control.stop_step)
2813 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2814 else
2815 {
2816 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2817 cmd_done = 1;
2818 }
2819 }
2820
2821 /* No error, don't finish the thread states yet. */
2822 discard_cleanups (ts_old_chain);
2823
2824 /* Revert thread and frame. */
2825 do_cleanups (old_chain);
2826
2827 /* If the inferior was in sync execution mode, and now isn't,
2828 restore the prompt (a synchronous execution command has finished,
2829 and we're ready for input). */
2830 if (interpreter_async && was_sync && !sync_execution)
2831 display_gdb_prompt (0);
2832
2833 if (cmd_done
2834 && !was_sync
2835 && exec_done_display_p
2836 && (ptid_equal (inferior_ptid, null_ptid)
2837 || !is_running (inferior_ptid)))
2838 printf_unfiltered (_("completed.\n"));
2839 }
2840
2841 /* Record the frame and location we're currently stepping through. */
2842 void
2843 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2844 {
2845 struct thread_info *tp = inferior_thread ();
2846
2847 tp->control.step_frame_id = get_frame_id (frame);
2848 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2849
2850 tp->current_symtab = sal.symtab;
2851 tp->current_line = sal.line;
2852 }
2853
2854 /* Clear context switchable stepping state. */
2855
2856 void
2857 init_thread_stepping_state (struct thread_info *tss)
2858 {
2859 tss->stepping_over_breakpoint = 0;
2860 tss->step_after_step_resume_breakpoint = 0;
2861 }
2862
2863 /* Return the cached copy of the last pid/waitstatus returned by
2864 target_wait()/deprecated_target_wait_hook(). The data is actually
2865 cached by handle_inferior_event(), which gets called immediately
2866 after target_wait()/deprecated_target_wait_hook(). */
2867
2868 void
2869 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2870 {
2871 *ptidp = target_last_wait_ptid;
2872 *status = target_last_waitstatus;
2873 }
2874
2875 void
2876 nullify_last_target_wait_ptid (void)
2877 {
2878 target_last_wait_ptid = minus_one_ptid;
2879 }
2880
2881 /* Switch thread contexts. */
2882
2883 static void
2884 context_switch (ptid_t ptid)
2885 {
2886 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2887 {
2888 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2889 target_pid_to_str (inferior_ptid));
2890 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2891 target_pid_to_str (ptid));
2892 }
2893
2894 switch_to_thread (ptid);
2895 }
2896
2897 static void
2898 adjust_pc_after_break (struct execution_control_state *ecs)
2899 {
2900 struct regcache *regcache;
2901 struct gdbarch *gdbarch;
2902 struct address_space *aspace;
2903 CORE_ADDR breakpoint_pc;
2904
2905 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2906 we aren't, just return.
2907
2908 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2909 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2910 implemented by software breakpoints should be handled through the normal
2911 breakpoint layer.
2912
2913 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2914 different signals (SIGILL or SIGEMT for instance), but it is less
2915 clear where the PC is pointing afterwards. It may not match
2916 gdbarch_decr_pc_after_break. I don't know any specific target that
2917 generates these signals at breakpoints (the code has been in GDB since at
2918 least 1992) so I can not guess how to handle them here.
2919
2920 In earlier versions of GDB, a target with
2921 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2922 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2923 target with both of these set in GDB history, and it seems unlikely to be
2924 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2925
2926 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2927 return;
2928
2929 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2930 return;
2931
2932 /* In reverse execution, when a breakpoint is hit, the instruction
2933 under it has already been de-executed. The reported PC always
2934 points at the breakpoint address, so adjusting it further would
2935 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2936 architecture:
2937
2938 B1 0x08000000 : INSN1
2939 B2 0x08000001 : INSN2
2940 0x08000002 : INSN3
2941 PC -> 0x08000003 : INSN4
2942
2943 Say you're stopped at 0x08000003 as above. Reverse continuing
2944 from that point should hit B2 as below. Reading the PC when the
2945 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2946 been de-executed already.
2947
2948 B1 0x08000000 : INSN1
2949 B2 PC -> 0x08000001 : INSN2
2950 0x08000002 : INSN3
2951 0x08000003 : INSN4
2952
2953 We can't apply the same logic as for forward execution, because
2954 we would wrongly adjust the PC to 0x08000000, since there's a
2955 breakpoint at PC - 1. We'd then report a hit on B1, although
2956 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2957 behaviour. */
2958 if (execution_direction == EXEC_REVERSE)
2959 return;
2960
2961 /* If this target does not decrement the PC after breakpoints, then
2962 we have nothing to do. */
2963 regcache = get_thread_regcache (ecs->ptid);
2964 gdbarch = get_regcache_arch (regcache);
2965 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2966 return;
2967
2968 aspace = get_regcache_aspace (regcache);
2969
2970 /* Find the location where (if we've hit a breakpoint) the
2971 breakpoint would be. */
2972 breakpoint_pc = regcache_read_pc (regcache)
2973 - gdbarch_decr_pc_after_break (gdbarch);
2974
2975 /* Check whether there actually is a software breakpoint inserted at
2976 that location.
2977
2978 If in non-stop mode, a race condition is possible where we've
2979 removed a breakpoint, but stop events for that breakpoint were
2980 already queued and arrive later. To suppress those spurious
2981 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2982 and retire them after a number of stop events are reported. */
2983 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2984 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2985 {
2986 struct cleanup *old_cleanups = NULL;
2987
2988 if (RECORD_IS_USED)
2989 old_cleanups = record_gdb_operation_disable_set ();
2990
2991 /* When using hardware single-step, a SIGTRAP is reported for both
2992 a completed single-step and a software breakpoint. Need to
2993 differentiate between the two, as the latter needs adjusting
2994 but the former does not.
2995
2996 The SIGTRAP can be due to a completed hardware single-step only if
2997 - we didn't insert software single-step breakpoints
2998 - the thread to be examined is still the current thread
2999 - this thread is currently being stepped
3000
3001 If any of these events did not occur, we must have stopped due
3002 to hitting a software breakpoint, and have to back up to the
3003 breakpoint address.
3004
3005 As a special case, we could have hardware single-stepped a
3006 software breakpoint. In this case (prev_pc == breakpoint_pc),
3007 we also need to back up to the breakpoint address. */
3008
3009 if (singlestep_breakpoints_inserted_p
3010 || !ptid_equal (ecs->ptid, inferior_ptid)
3011 || !currently_stepping (ecs->event_thread)
3012 || ecs->event_thread->prev_pc == breakpoint_pc)
3013 regcache_write_pc (regcache, breakpoint_pc);
3014
3015 if (RECORD_IS_USED)
3016 do_cleanups (old_cleanups);
3017 }
3018 }
3019
3020 void
3021 init_infwait_state (void)
3022 {
3023 waiton_ptid = pid_to_ptid (-1);
3024 infwait_state = infwait_normal_state;
3025 }
3026
3027 void
3028 error_is_running (void)
3029 {
3030 error (_("Cannot execute this command while "
3031 "the selected thread is running."));
3032 }
3033
3034 void
3035 ensure_not_running (void)
3036 {
3037 if (is_running (inferior_ptid))
3038 error_is_running ();
3039 }
3040
3041 static int
3042 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3043 {
3044 for (frame = get_prev_frame (frame);
3045 frame != NULL;
3046 frame = get_prev_frame (frame))
3047 {
3048 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3049 return 1;
3050 if (get_frame_type (frame) != INLINE_FRAME)
3051 break;
3052 }
3053
3054 return 0;
3055 }
3056
3057 /* Auxiliary function that handles syscall entry/return events.
3058 It returns 1 if the inferior should keep going (and GDB
3059 should ignore the event), or 0 if the event deserves to be
3060 processed. */
3061
3062 static int
3063 handle_syscall_event (struct execution_control_state *ecs)
3064 {
3065 struct regcache *regcache;
3066 struct gdbarch *gdbarch;
3067 int syscall_number;
3068
3069 if (!ptid_equal (ecs->ptid, inferior_ptid))
3070 context_switch (ecs->ptid);
3071
3072 regcache = get_thread_regcache (ecs->ptid);
3073 gdbarch = get_regcache_arch (regcache);
3074 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3075 stop_pc = regcache_read_pc (regcache);
3076
3077 target_last_waitstatus.value.syscall_number = syscall_number;
3078
3079 if (catch_syscall_enabled () > 0
3080 && catching_syscall_number (syscall_number) > 0)
3081 {
3082 if (debug_infrun)
3083 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3084 syscall_number);
3085
3086 ecs->event_thread->control.stop_bpstat
3087 = bpstat_stop_status (get_regcache_aspace (regcache),
3088 stop_pc, ecs->ptid);
3089 ecs->random_signal
3090 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3091
3092 if (!ecs->random_signal)
3093 {
3094 /* Catchpoint hit. */
3095 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3096 return 0;
3097 }
3098 }
3099
3100 /* If no catchpoint triggered for this, then keep going. */
3101 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3102 keep_going (ecs);
3103 return 1;
3104 }
3105
3106 /* Clear the supplied execution_control_state's stop_func_* fields. */
3107
3108 static void
3109 clear_stop_func (struct execution_control_state *ecs)
3110 {
3111 ecs->stop_func_filled_in = 0;
3112 ecs->stop_func_start = 0;
3113 ecs->stop_func_end = 0;
3114 ecs->stop_func_name = NULL;
3115 }
3116
3117 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3118
3119 static void
3120 fill_in_stop_func (struct gdbarch *gdbarch,
3121 struct execution_control_state *ecs)
3122 {
3123 if (!ecs->stop_func_filled_in)
3124 {
3125 /* Don't care about return value; stop_func_start and stop_func_name
3126 will both be 0 if it doesn't work. */
3127 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3128 &ecs->stop_func_start, &ecs->stop_func_end);
3129 ecs->stop_func_start
3130 += gdbarch_deprecated_function_start_offset (gdbarch);
3131
3132 ecs->stop_func_filled_in = 1;
3133 }
3134 }
3135
3136 /* Given an execution control state that has been freshly filled in
3137 by an event from the inferior, figure out what it means and take
3138 appropriate action. */
3139
3140 static void
3141 handle_inferior_event (struct execution_control_state *ecs)
3142 {
3143 struct frame_info *frame;
3144 struct gdbarch *gdbarch;
3145 int stopped_by_watchpoint;
3146 int stepped_after_stopped_by_watchpoint = 0;
3147 struct symtab_and_line stop_pc_sal;
3148 enum stop_kind stop_soon;
3149
3150 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3151 {
3152 /* We had an event in the inferior, but we are not interested in
3153 handling it at this level. The lower layers have already
3154 done what needs to be done, if anything.
3155
3156 One of the possible circumstances for this is when the
3157 inferior produces output for the console. The inferior has
3158 not stopped, and we are ignoring the event. Another possible
3159 circumstance is any event which the lower level knows will be
3160 reported multiple times without an intervening resume. */
3161 if (debug_infrun)
3162 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3163 prepare_to_wait (ecs);
3164 return;
3165 }
3166
3167 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3168 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3169 {
3170 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3171
3172 gdb_assert (inf);
3173 stop_soon = inf->control.stop_soon;
3174 }
3175 else
3176 stop_soon = NO_STOP_QUIETLY;
3177
3178 /* Cache the last pid/waitstatus. */
3179 target_last_wait_ptid = ecs->ptid;
3180 target_last_waitstatus = ecs->ws;
3181
3182 /* Always clear state belonging to the previous time we stopped. */
3183 stop_stack_dummy = STOP_NONE;
3184
3185 /* If it's a new process, add it to the thread database. */
3186
3187 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3188 && !ptid_equal (ecs->ptid, minus_one_ptid)
3189 && !in_thread_list (ecs->ptid));
3190
3191 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3192 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3193 add_thread (ecs->ptid);
3194
3195 ecs->event_thread = find_thread_ptid (ecs->ptid);
3196
3197 /* Dependent on valid ECS->EVENT_THREAD. */
3198 adjust_pc_after_break (ecs);
3199
3200 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3201 reinit_frame_cache ();
3202
3203 breakpoint_retire_moribund ();
3204
3205 /* First, distinguish signals caused by the debugger from signals
3206 that have to do with the program's own actions. Note that
3207 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3208 on the operating system version. Here we detect when a SIGILL or
3209 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3210 something similar for SIGSEGV, since a SIGSEGV will be generated
3211 when we're trying to execute a breakpoint instruction on a
3212 non-executable stack. This happens for call dummy breakpoints
3213 for architectures like SPARC that place call dummies on the
3214 stack. */
3215 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3216 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3217 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3218 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3219 {
3220 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3221
3222 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3223 regcache_read_pc (regcache)))
3224 {
3225 if (debug_infrun)
3226 fprintf_unfiltered (gdb_stdlog,
3227 "infrun: Treating signal as SIGTRAP\n");
3228 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3229 }
3230 }
3231
3232 /* Mark the non-executing threads accordingly. In all-stop, all
3233 threads of all processes are stopped when we get any event
3234 reported. In non-stop mode, only the event thread stops. If
3235 we're handling a process exit in non-stop mode, there's nothing
3236 to do, as threads of the dead process are gone, and threads of
3237 any other process were left running. */
3238 if (!non_stop)
3239 set_executing (minus_one_ptid, 0);
3240 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3241 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3242 set_executing (inferior_ptid, 0);
3243
3244 switch (infwait_state)
3245 {
3246 case infwait_thread_hop_state:
3247 if (debug_infrun)
3248 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3249 break;
3250
3251 case infwait_normal_state:
3252 if (debug_infrun)
3253 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3254 break;
3255
3256 case infwait_step_watch_state:
3257 if (debug_infrun)
3258 fprintf_unfiltered (gdb_stdlog,
3259 "infrun: infwait_step_watch_state\n");
3260
3261 stepped_after_stopped_by_watchpoint = 1;
3262 break;
3263
3264 case infwait_nonstep_watch_state:
3265 if (debug_infrun)
3266 fprintf_unfiltered (gdb_stdlog,
3267 "infrun: infwait_nonstep_watch_state\n");
3268 insert_breakpoints ();
3269
3270 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3271 handle things like signals arriving and other things happening
3272 in combination correctly? */
3273 stepped_after_stopped_by_watchpoint = 1;
3274 break;
3275
3276 default:
3277 internal_error (__FILE__, __LINE__, _("bad switch"));
3278 }
3279
3280 infwait_state = infwait_normal_state;
3281 waiton_ptid = pid_to_ptid (-1);
3282
3283 switch (ecs->ws.kind)
3284 {
3285 case TARGET_WAITKIND_LOADED:
3286 if (debug_infrun)
3287 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3288 /* Ignore gracefully during startup of the inferior, as it might
3289 be the shell which has just loaded some objects, otherwise
3290 add the symbols for the newly loaded objects. Also ignore at
3291 the beginning of an attach or remote session; we will query
3292 the full list of libraries once the connection is
3293 established. */
3294 if (stop_soon == NO_STOP_QUIETLY)
3295 {
3296 /* Check for any newly added shared libraries if we're
3297 supposed to be adding them automatically. Switch
3298 terminal for any messages produced by
3299 breakpoint_re_set. */
3300 target_terminal_ours_for_output ();
3301 /* NOTE: cagney/2003-11-25: Make certain that the target
3302 stack's section table is kept up-to-date. Architectures,
3303 (e.g., PPC64), use the section table to perform
3304 operations such as address => section name and hence
3305 require the table to contain all sections (including
3306 those found in shared libraries). */
3307 #ifdef SOLIB_ADD
3308 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3309 #else
3310 solib_add (NULL, 0, &current_target, auto_solib_add);
3311 #endif
3312 target_terminal_inferior ();
3313
3314 /* If requested, stop when the dynamic linker notifies
3315 gdb of events. This allows the user to get control
3316 and place breakpoints in initializer routines for
3317 dynamically loaded objects (among other things). */
3318 if (stop_on_solib_events)
3319 {
3320 /* Make sure we print "Stopped due to solib-event" in
3321 normal_stop. */
3322 stop_print_frame = 1;
3323
3324 stop_stepping (ecs);
3325 return;
3326 }
3327
3328 /* NOTE drow/2007-05-11: This might be a good place to check
3329 for "catch load". */
3330 }
3331
3332 /* If we are skipping through a shell, or through shared library
3333 loading that we aren't interested in, resume the program. If
3334 we're running the program normally, also resume. But stop if
3335 we're attaching or setting up a remote connection. */
3336 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3337 {
3338 /* Loading of shared libraries might have changed breakpoint
3339 addresses. Make sure new breakpoints are inserted. */
3340 if (stop_soon == NO_STOP_QUIETLY
3341 && !breakpoints_always_inserted_mode ())
3342 insert_breakpoints ();
3343 resume (0, TARGET_SIGNAL_0);
3344 prepare_to_wait (ecs);
3345 return;
3346 }
3347
3348 break;
3349
3350 case TARGET_WAITKIND_SPURIOUS:
3351 if (debug_infrun)
3352 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3353 resume (0, TARGET_SIGNAL_0);
3354 prepare_to_wait (ecs);
3355 return;
3356
3357 case TARGET_WAITKIND_EXITED:
3358 if (debug_infrun)
3359 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3360 inferior_ptid = ecs->ptid;
3361 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3362 set_current_program_space (current_inferior ()->pspace);
3363 handle_vfork_child_exec_or_exit (0);
3364 target_terminal_ours (); /* Must do this before mourn anyway. */
3365 print_exited_reason (ecs->ws.value.integer);
3366
3367 /* Record the exit code in the convenience variable $_exitcode, so
3368 that the user can inspect this again later. */
3369 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3370 (LONGEST) ecs->ws.value.integer);
3371
3372 /* Also record this in the inferior itself. */
3373 current_inferior ()->has_exit_code = 1;
3374 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3375
3376 gdb_flush (gdb_stdout);
3377 target_mourn_inferior ();
3378 singlestep_breakpoints_inserted_p = 0;
3379 cancel_single_step_breakpoints ();
3380 stop_print_frame = 0;
3381 stop_stepping (ecs);
3382 return;
3383
3384 case TARGET_WAITKIND_SIGNALLED:
3385 if (debug_infrun)
3386 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3387 inferior_ptid = ecs->ptid;
3388 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3389 set_current_program_space (current_inferior ()->pspace);
3390 handle_vfork_child_exec_or_exit (0);
3391 stop_print_frame = 0;
3392 target_terminal_ours (); /* Must do this before mourn anyway. */
3393
3394 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3395 reach here unless the inferior is dead. However, for years
3396 target_kill() was called here, which hints that fatal signals aren't
3397 really fatal on some systems. If that's true, then some changes
3398 may be needed. */
3399 target_mourn_inferior ();
3400
3401 print_signal_exited_reason (ecs->ws.value.sig);
3402 singlestep_breakpoints_inserted_p = 0;
3403 cancel_single_step_breakpoints ();
3404 stop_stepping (ecs);
3405 return;
3406
3407 /* The following are the only cases in which we keep going;
3408 the above cases end in a continue or goto. */
3409 case TARGET_WAITKIND_FORKED:
3410 case TARGET_WAITKIND_VFORKED:
3411 if (debug_infrun)
3412 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3413
3414 /* Check whether the inferior is displaced stepping. */
3415 {
3416 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3417 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3418 struct displaced_step_inferior_state *displaced
3419 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3420
3421 /* If checking displaced stepping is supported, and thread
3422 ecs->ptid is displaced stepping. */
3423 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3424 {
3425 struct inferior *parent_inf
3426 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3427 struct regcache *child_regcache;
3428 CORE_ADDR parent_pc;
3429
3430 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3431 indicating that the displaced stepping of syscall instruction
3432 has been done. Perform cleanup for parent process here. Note
3433 that this operation also cleans up the child process for vfork,
3434 because their pages are shared. */
3435 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3436
3437 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3438 {
3439 /* Restore scratch pad for child process. */
3440 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3441 }
3442
3443 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3444 the child's PC is also within the scratchpad. Set the child's PC
3445 to the parent's PC value, which has already been fixed up.
3446 FIXME: we use the parent's aspace here, although we're touching
3447 the child, because the child hasn't been added to the inferior
3448 list yet at this point. */
3449
3450 child_regcache
3451 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3452 gdbarch,
3453 parent_inf->aspace);
3454 /* Read PC value of parent process. */
3455 parent_pc = regcache_read_pc (regcache);
3456
3457 if (debug_displaced)
3458 fprintf_unfiltered (gdb_stdlog,
3459 "displaced: write child pc from %s to %s\n",
3460 paddress (gdbarch,
3461 regcache_read_pc (child_regcache)),
3462 paddress (gdbarch, parent_pc));
3463
3464 regcache_write_pc (child_regcache, parent_pc);
3465 }
3466 }
3467
3468 if (!ptid_equal (ecs->ptid, inferior_ptid))
3469 {
3470 context_switch (ecs->ptid);
3471 reinit_frame_cache ();
3472 }
3473
3474 /* Immediately detach breakpoints from the child before there's
3475 any chance of letting the user delete breakpoints from the
3476 breakpoint lists. If we don't do this early, it's easy to
3477 leave left over traps in the child, vis: "break foo; catch
3478 fork; c; <fork>; del; c; <child calls foo>". We only follow
3479 the fork on the last `continue', and by that time the
3480 breakpoint at "foo" is long gone from the breakpoint table.
3481 If we vforked, then we don't need to unpatch here, since both
3482 parent and child are sharing the same memory pages; we'll
3483 need to unpatch at follow/detach time instead to be certain
3484 that new breakpoints added between catchpoint hit time and
3485 vfork follow are detached. */
3486 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3487 {
3488 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3489
3490 /* This won't actually modify the breakpoint list, but will
3491 physically remove the breakpoints from the child. */
3492 detach_breakpoints (child_pid);
3493 }
3494
3495 if (singlestep_breakpoints_inserted_p)
3496 {
3497 /* Pull the single step breakpoints out of the target. */
3498 remove_single_step_breakpoints ();
3499 singlestep_breakpoints_inserted_p = 0;
3500 }
3501
3502 /* In case the event is caught by a catchpoint, remember that
3503 the event is to be followed at the next resume of the thread,
3504 and not immediately. */
3505 ecs->event_thread->pending_follow = ecs->ws;
3506
3507 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3508
3509 ecs->event_thread->control.stop_bpstat
3510 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3511 stop_pc, ecs->ptid);
3512
3513 /* Note that we're interested in knowing the bpstat actually
3514 causes a stop, not just if it may explain the signal.
3515 Software watchpoints, for example, always appear in the
3516 bpstat. */
3517 ecs->random_signal
3518 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3519
3520 /* If no catchpoint triggered for this, then keep going. */
3521 if (ecs->random_signal)
3522 {
3523 ptid_t parent;
3524 ptid_t child;
3525 int should_resume;
3526 int follow_child
3527 = (follow_fork_mode_string == follow_fork_mode_child);
3528
3529 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3530
3531 should_resume = follow_fork ();
3532
3533 parent = ecs->ptid;
3534 child = ecs->ws.value.related_pid;
3535
3536 /* In non-stop mode, also resume the other branch. */
3537 if (non_stop && !detach_fork)
3538 {
3539 if (follow_child)
3540 switch_to_thread (parent);
3541 else
3542 switch_to_thread (child);
3543
3544 ecs->event_thread = inferior_thread ();
3545 ecs->ptid = inferior_ptid;
3546 keep_going (ecs);
3547 }
3548
3549 if (follow_child)
3550 switch_to_thread (child);
3551 else
3552 switch_to_thread (parent);
3553
3554 ecs->event_thread = inferior_thread ();
3555 ecs->ptid = inferior_ptid;
3556
3557 if (should_resume)
3558 keep_going (ecs);
3559 else
3560 stop_stepping (ecs);
3561 return;
3562 }
3563 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3564 goto process_event_stop_test;
3565
3566 case TARGET_WAITKIND_VFORK_DONE:
3567 /* Done with the shared memory region. Re-insert breakpoints in
3568 the parent, and keep going. */
3569
3570 if (debug_infrun)
3571 fprintf_unfiltered (gdb_stdlog,
3572 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3573
3574 if (!ptid_equal (ecs->ptid, inferior_ptid))
3575 context_switch (ecs->ptid);
3576
3577 current_inferior ()->waiting_for_vfork_done = 0;
3578 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3579 /* This also takes care of reinserting breakpoints in the
3580 previously locked inferior. */
3581 keep_going (ecs);
3582 return;
3583
3584 case TARGET_WAITKIND_EXECD:
3585 if (debug_infrun)
3586 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3587
3588 if (!ptid_equal (ecs->ptid, inferior_ptid))
3589 {
3590 context_switch (ecs->ptid);
3591 reinit_frame_cache ();
3592 }
3593
3594 singlestep_breakpoints_inserted_p = 0;
3595 cancel_single_step_breakpoints ();
3596
3597 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3598
3599 /* Do whatever is necessary to the parent branch of the vfork. */
3600 handle_vfork_child_exec_or_exit (1);
3601
3602 /* This causes the eventpoints and symbol table to be reset.
3603 Must do this now, before trying to determine whether to
3604 stop. */
3605 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3606
3607 ecs->event_thread->control.stop_bpstat
3608 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3609 stop_pc, ecs->ptid);
3610 ecs->random_signal
3611 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3612
3613 /* Note that this may be referenced from inside
3614 bpstat_stop_status above, through inferior_has_execd. */
3615 xfree (ecs->ws.value.execd_pathname);
3616 ecs->ws.value.execd_pathname = NULL;
3617
3618 /* If no catchpoint triggered for this, then keep going. */
3619 if (ecs->random_signal)
3620 {
3621 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3622 keep_going (ecs);
3623 return;
3624 }
3625 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3626 goto process_event_stop_test;
3627
3628 /* Be careful not to try to gather much state about a thread
3629 that's in a syscall. It's frequently a losing proposition. */
3630 case TARGET_WAITKIND_SYSCALL_ENTRY:
3631 if (debug_infrun)
3632 fprintf_unfiltered (gdb_stdlog,
3633 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3634 /* Getting the current syscall number. */
3635 if (handle_syscall_event (ecs) != 0)
3636 return;
3637 goto process_event_stop_test;
3638
3639 /* Before examining the threads further, step this thread to
3640 get it entirely out of the syscall. (We get notice of the
3641 event when the thread is just on the verge of exiting a
3642 syscall. Stepping one instruction seems to get it back
3643 into user code.) */
3644 case TARGET_WAITKIND_SYSCALL_RETURN:
3645 if (debug_infrun)
3646 fprintf_unfiltered (gdb_stdlog,
3647 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3648 if (handle_syscall_event (ecs) != 0)
3649 return;
3650 goto process_event_stop_test;
3651
3652 case TARGET_WAITKIND_STOPPED:
3653 if (debug_infrun)
3654 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3655 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3656 break;
3657
3658 case TARGET_WAITKIND_NO_HISTORY:
3659 /* Reverse execution: target ran out of history info. */
3660 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3661 print_no_history_reason ();
3662 stop_stepping (ecs);
3663 return;
3664 }
3665
3666 if (ecs->new_thread_event)
3667 {
3668 if (non_stop)
3669 /* Non-stop assumes that the target handles adding new threads
3670 to the thread list. */
3671 internal_error (__FILE__, __LINE__,
3672 "targets should add new threads to the thread "
3673 "list themselves in non-stop mode.");
3674
3675 /* We may want to consider not doing a resume here in order to
3676 give the user a chance to play with the new thread. It might
3677 be good to make that a user-settable option. */
3678
3679 /* At this point, all threads are stopped (happens automatically
3680 in either the OS or the native code). Therefore we need to
3681 continue all threads in order to make progress. */
3682
3683 if (!ptid_equal (ecs->ptid, inferior_ptid))
3684 context_switch (ecs->ptid);
3685 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3686 prepare_to_wait (ecs);
3687 return;
3688 }
3689
3690 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3691 {
3692 /* Do we need to clean up the state of a thread that has
3693 completed a displaced single-step? (Doing so usually affects
3694 the PC, so do it here, before we set stop_pc.) */
3695 displaced_step_fixup (ecs->ptid,
3696 ecs->event_thread->suspend.stop_signal);
3697
3698 /* If we either finished a single-step or hit a breakpoint, but
3699 the user wanted this thread to be stopped, pretend we got a
3700 SIG0 (generic unsignaled stop). */
3701
3702 if (ecs->event_thread->stop_requested
3703 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3704 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3705 }
3706
3707 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3708
3709 if (debug_infrun)
3710 {
3711 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3712 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3713 struct cleanup *old_chain = save_inferior_ptid ();
3714
3715 inferior_ptid = ecs->ptid;
3716
3717 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3718 paddress (gdbarch, stop_pc));
3719 if (target_stopped_by_watchpoint ())
3720 {
3721 CORE_ADDR addr;
3722
3723 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3724
3725 if (target_stopped_data_address (&current_target, &addr))
3726 fprintf_unfiltered (gdb_stdlog,
3727 "infrun: stopped data address = %s\n",
3728 paddress (gdbarch, addr));
3729 else
3730 fprintf_unfiltered (gdb_stdlog,
3731 "infrun: (no data address available)\n");
3732 }
3733
3734 do_cleanups (old_chain);
3735 }
3736
3737 if (stepping_past_singlestep_breakpoint)
3738 {
3739 gdb_assert (singlestep_breakpoints_inserted_p);
3740 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3741 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3742
3743 stepping_past_singlestep_breakpoint = 0;
3744
3745 /* We've either finished single-stepping past the single-step
3746 breakpoint, or stopped for some other reason. It would be nice if
3747 we could tell, but we can't reliably. */
3748 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3749 {
3750 if (debug_infrun)
3751 fprintf_unfiltered (gdb_stdlog,
3752 "infrun: stepping_past_"
3753 "singlestep_breakpoint\n");
3754 /* Pull the single step breakpoints out of the target. */
3755 remove_single_step_breakpoints ();
3756 singlestep_breakpoints_inserted_p = 0;
3757
3758 ecs->random_signal = 0;
3759 ecs->event_thread->control.trap_expected = 0;
3760
3761 context_switch (saved_singlestep_ptid);
3762 if (deprecated_context_hook)
3763 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3764
3765 resume (1, TARGET_SIGNAL_0);
3766 prepare_to_wait (ecs);
3767 return;
3768 }
3769 }
3770
3771 if (!ptid_equal (deferred_step_ptid, null_ptid))
3772 {
3773 /* In non-stop mode, there's never a deferred_step_ptid set. */
3774 gdb_assert (!non_stop);
3775
3776 /* If we stopped for some other reason than single-stepping, ignore
3777 the fact that we were supposed to switch back. */
3778 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3779 {
3780 if (debug_infrun)
3781 fprintf_unfiltered (gdb_stdlog,
3782 "infrun: handling deferred step\n");
3783
3784 /* Pull the single step breakpoints out of the target. */
3785 if (singlestep_breakpoints_inserted_p)
3786 {
3787 remove_single_step_breakpoints ();
3788 singlestep_breakpoints_inserted_p = 0;
3789 }
3790
3791 ecs->event_thread->control.trap_expected = 0;
3792
3793 /* Note: We do not call context_switch at this point, as the
3794 context is already set up for stepping the original thread. */
3795 switch_to_thread (deferred_step_ptid);
3796 deferred_step_ptid = null_ptid;
3797 /* Suppress spurious "Switching to ..." message. */
3798 previous_inferior_ptid = inferior_ptid;
3799
3800 resume (1, TARGET_SIGNAL_0);
3801 prepare_to_wait (ecs);
3802 return;
3803 }
3804
3805 deferred_step_ptid = null_ptid;
3806 }
3807
3808 /* See if a thread hit a thread-specific breakpoint that was meant for
3809 another thread. If so, then step that thread past the breakpoint,
3810 and continue it. */
3811
3812 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3813 {
3814 int thread_hop_needed = 0;
3815 struct address_space *aspace =
3816 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3817
3818 /* Check if a regular breakpoint has been hit before checking
3819 for a potential single step breakpoint. Otherwise, GDB will
3820 not see this breakpoint hit when stepping onto breakpoints. */
3821 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3822 {
3823 ecs->random_signal = 0;
3824 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3825 thread_hop_needed = 1;
3826 }
3827 else if (singlestep_breakpoints_inserted_p)
3828 {
3829 /* We have not context switched yet, so this should be true
3830 no matter which thread hit the singlestep breakpoint. */
3831 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3832 if (debug_infrun)
3833 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3834 "trap for %s\n",
3835 target_pid_to_str (ecs->ptid));
3836
3837 ecs->random_signal = 0;
3838 /* The call to in_thread_list is necessary because PTIDs sometimes
3839 change when we go from single-threaded to multi-threaded. If
3840 the singlestep_ptid is still in the list, assume that it is
3841 really different from ecs->ptid. */
3842 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3843 && in_thread_list (singlestep_ptid))
3844 {
3845 /* If the PC of the thread we were trying to single-step
3846 has changed, discard this event (which we were going
3847 to ignore anyway), and pretend we saw that thread
3848 trap. This prevents us continuously moving the
3849 single-step breakpoint forward, one instruction at a
3850 time. If the PC has changed, then the thread we were
3851 trying to single-step has trapped or been signalled,
3852 but the event has not been reported to GDB yet.
3853
3854 There might be some cases where this loses signal
3855 information, if a signal has arrived at exactly the
3856 same time that the PC changed, but this is the best
3857 we can do with the information available. Perhaps we
3858 should arrange to report all events for all threads
3859 when they stop, or to re-poll the remote looking for
3860 this particular thread (i.e. temporarily enable
3861 schedlock). */
3862
3863 CORE_ADDR new_singlestep_pc
3864 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3865
3866 if (new_singlestep_pc != singlestep_pc)
3867 {
3868 enum target_signal stop_signal;
3869
3870 if (debug_infrun)
3871 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3872 " but expected thread advanced also\n");
3873
3874 /* The current context still belongs to
3875 singlestep_ptid. Don't swap here, since that's
3876 the context we want to use. Just fudge our
3877 state and continue. */
3878 stop_signal = ecs->event_thread->suspend.stop_signal;
3879 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3880 ecs->ptid = singlestep_ptid;
3881 ecs->event_thread = find_thread_ptid (ecs->ptid);
3882 ecs->event_thread->suspend.stop_signal = stop_signal;
3883 stop_pc = new_singlestep_pc;
3884 }
3885 else
3886 {
3887 if (debug_infrun)
3888 fprintf_unfiltered (gdb_stdlog,
3889 "infrun: unexpected thread\n");
3890
3891 thread_hop_needed = 1;
3892 stepping_past_singlestep_breakpoint = 1;
3893 saved_singlestep_ptid = singlestep_ptid;
3894 }
3895 }
3896 }
3897
3898 if (thread_hop_needed)
3899 {
3900 struct regcache *thread_regcache;
3901 int remove_status = 0;
3902
3903 if (debug_infrun)
3904 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3905
3906 /* Switch context before touching inferior memory, the
3907 previous thread may have exited. */
3908 if (!ptid_equal (inferior_ptid, ecs->ptid))
3909 context_switch (ecs->ptid);
3910
3911 /* Saw a breakpoint, but it was hit by the wrong thread.
3912 Just continue. */
3913
3914 if (singlestep_breakpoints_inserted_p)
3915 {
3916 /* Pull the single step breakpoints out of the target. */
3917 remove_single_step_breakpoints ();
3918 singlestep_breakpoints_inserted_p = 0;
3919 }
3920
3921 /* If the arch can displace step, don't remove the
3922 breakpoints. */
3923 thread_regcache = get_thread_regcache (ecs->ptid);
3924 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3925 remove_status = remove_breakpoints ();
3926
3927 /* Did we fail to remove breakpoints? If so, try
3928 to set the PC past the bp. (There's at least
3929 one situation in which we can fail to remove
3930 the bp's: On HP-UX's that use ttrace, we can't
3931 change the address space of a vforking child
3932 process until the child exits (well, okay, not
3933 then either :-) or execs. */
3934 if (remove_status != 0)
3935 error (_("Cannot step over breakpoint hit in wrong thread"));
3936 else
3937 { /* Single step */
3938 if (!non_stop)
3939 {
3940 /* Only need to require the next event from this
3941 thread in all-stop mode. */
3942 waiton_ptid = ecs->ptid;
3943 infwait_state = infwait_thread_hop_state;
3944 }
3945
3946 ecs->event_thread->stepping_over_breakpoint = 1;
3947 keep_going (ecs);
3948 return;
3949 }
3950 }
3951 else if (singlestep_breakpoints_inserted_p)
3952 {
3953 ecs->random_signal = 0;
3954 }
3955 }
3956 else
3957 ecs->random_signal = 1;
3958
3959 /* See if something interesting happened to the non-current thread. If
3960 so, then switch to that thread. */
3961 if (!ptid_equal (ecs->ptid, inferior_ptid))
3962 {
3963 if (debug_infrun)
3964 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3965
3966 context_switch (ecs->ptid);
3967
3968 if (deprecated_context_hook)
3969 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3970 }
3971
3972 /* At this point, get hold of the now-current thread's frame. */
3973 frame = get_current_frame ();
3974 gdbarch = get_frame_arch (frame);
3975
3976 if (singlestep_breakpoints_inserted_p)
3977 {
3978 /* Pull the single step breakpoints out of the target. */
3979 remove_single_step_breakpoints ();
3980 singlestep_breakpoints_inserted_p = 0;
3981 }
3982
3983 if (stepped_after_stopped_by_watchpoint)
3984 stopped_by_watchpoint = 0;
3985 else
3986 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3987
3988 /* If necessary, step over this watchpoint. We'll be back to display
3989 it in a moment. */
3990 if (stopped_by_watchpoint
3991 && (target_have_steppable_watchpoint
3992 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3993 {
3994 /* At this point, we are stopped at an instruction which has
3995 attempted to write to a piece of memory under control of
3996 a watchpoint. The instruction hasn't actually executed
3997 yet. If we were to evaluate the watchpoint expression
3998 now, we would get the old value, and therefore no change
3999 would seem to have occurred.
4000
4001 In order to make watchpoints work `right', we really need
4002 to complete the memory write, and then evaluate the
4003 watchpoint expression. We do this by single-stepping the
4004 target.
4005
4006 It may not be necessary to disable the watchpoint to stop over
4007 it. For example, the PA can (with some kernel cooperation)
4008 single step over a watchpoint without disabling the watchpoint.
4009
4010 It is far more common to need to disable a watchpoint to step
4011 the inferior over it. If we have non-steppable watchpoints,
4012 we must disable the current watchpoint; it's simplest to
4013 disable all watchpoints and breakpoints. */
4014 int hw_step = 1;
4015
4016 if (!target_have_steppable_watchpoint)
4017 {
4018 remove_breakpoints ();
4019 /* See comment in resume why we need to stop bypassing signals
4020 while breakpoints have been removed. */
4021 target_pass_signals (0, NULL);
4022 }
4023 /* Single step */
4024 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4025 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4026 waiton_ptid = ecs->ptid;
4027 if (target_have_steppable_watchpoint)
4028 infwait_state = infwait_step_watch_state;
4029 else
4030 infwait_state = infwait_nonstep_watch_state;
4031 prepare_to_wait (ecs);
4032 return;
4033 }
4034
4035 clear_stop_func (ecs);
4036 ecs->event_thread->stepping_over_breakpoint = 0;
4037 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4038 ecs->event_thread->control.stop_step = 0;
4039 stop_print_frame = 1;
4040 ecs->random_signal = 0;
4041 stopped_by_random_signal = 0;
4042
4043 /* Hide inlined functions starting here, unless we just performed stepi or
4044 nexti. After stepi and nexti, always show the innermost frame (not any
4045 inline function call sites). */
4046 if (ecs->event_thread->control.step_range_end != 1)
4047 skip_inline_frames (ecs->ptid);
4048
4049 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4050 && ecs->event_thread->control.trap_expected
4051 && gdbarch_single_step_through_delay_p (gdbarch)
4052 && currently_stepping (ecs->event_thread))
4053 {
4054 /* We're trying to step off a breakpoint. Turns out that we're
4055 also on an instruction that needs to be stepped multiple
4056 times before it's been fully executing. E.g., architectures
4057 with a delay slot. It needs to be stepped twice, once for
4058 the instruction and once for the delay slot. */
4059 int step_through_delay
4060 = gdbarch_single_step_through_delay (gdbarch, frame);
4061
4062 if (debug_infrun && step_through_delay)
4063 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4064 if (ecs->event_thread->control.step_range_end == 0
4065 && step_through_delay)
4066 {
4067 /* The user issued a continue when stopped at a breakpoint.
4068 Set up for another trap and get out of here. */
4069 ecs->event_thread->stepping_over_breakpoint = 1;
4070 keep_going (ecs);
4071 return;
4072 }
4073 else if (step_through_delay)
4074 {
4075 /* The user issued a step when stopped at a breakpoint.
4076 Maybe we should stop, maybe we should not - the delay
4077 slot *might* correspond to a line of source. In any
4078 case, don't decide that here, just set
4079 ecs->stepping_over_breakpoint, making sure we
4080 single-step again before breakpoints are re-inserted. */
4081 ecs->event_thread->stepping_over_breakpoint = 1;
4082 }
4083 }
4084
4085 /* Look at the cause of the stop, and decide what to do.
4086 The alternatives are:
4087 1) stop_stepping and return; to really stop and return to the debugger,
4088 2) keep_going and return to start up again
4089 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4090 3) set ecs->random_signal to 1, and the decision between 1 and 2
4091 will be made according to the signal handling tables. */
4092
4093 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4094 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4095 || stop_soon == STOP_QUIETLY_REMOTE)
4096 {
4097 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4098 && stop_after_trap)
4099 {
4100 if (debug_infrun)
4101 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4102 stop_print_frame = 0;
4103 stop_stepping (ecs);
4104 return;
4105 }
4106
4107 /* This is originated from start_remote(), start_inferior() and
4108 shared libraries hook functions. */
4109 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4110 {
4111 if (debug_infrun)
4112 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4113 stop_stepping (ecs);
4114 return;
4115 }
4116
4117 /* This originates from attach_command(). We need to overwrite
4118 the stop_signal here, because some kernels don't ignore a
4119 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4120 See more comments in inferior.h. On the other hand, if we
4121 get a non-SIGSTOP, report it to the user - assume the backend
4122 will handle the SIGSTOP if it should show up later.
4123
4124 Also consider that the attach is complete when we see a
4125 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4126 target extended-remote report it instead of a SIGSTOP
4127 (e.g. gdbserver). We already rely on SIGTRAP being our
4128 signal, so this is no exception.
4129
4130 Also consider that the attach is complete when we see a
4131 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4132 the target to stop all threads of the inferior, in case the
4133 low level attach operation doesn't stop them implicitly. If
4134 they weren't stopped implicitly, then the stub will report a
4135 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4136 other than GDB's request. */
4137 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4138 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4139 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4140 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4141 {
4142 stop_stepping (ecs);
4143 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4144 return;
4145 }
4146
4147 /* See if there is a breakpoint at the current PC. */
4148 ecs->event_thread->control.stop_bpstat
4149 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4150 stop_pc, ecs->ptid);
4151
4152 /* Following in case break condition called a
4153 function. */
4154 stop_print_frame = 1;
4155
4156 /* This is where we handle "moribund" watchpoints. Unlike
4157 software breakpoints traps, hardware watchpoint traps are
4158 always distinguishable from random traps. If no high-level
4159 watchpoint is associated with the reported stop data address
4160 anymore, then the bpstat does not explain the signal ---
4161 simply make sure to ignore it if `stopped_by_watchpoint' is
4162 set. */
4163
4164 if (debug_infrun
4165 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4166 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4167 && stopped_by_watchpoint)
4168 fprintf_unfiltered (gdb_stdlog,
4169 "infrun: no user watchpoint explains "
4170 "watchpoint SIGTRAP, ignoring\n");
4171
4172 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4173 at one stage in the past included checks for an inferior
4174 function call's call dummy's return breakpoint. The original
4175 comment, that went with the test, read:
4176
4177 ``End of a stack dummy. Some systems (e.g. Sony news) give
4178 another signal besides SIGTRAP, so check here as well as
4179 above.''
4180
4181 If someone ever tries to get call dummys on a
4182 non-executable stack to work (where the target would stop
4183 with something like a SIGSEGV), then those tests might need
4184 to be re-instated. Given, however, that the tests were only
4185 enabled when momentary breakpoints were not being used, I
4186 suspect that it won't be the case.
4187
4188 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4189 be necessary for call dummies on a non-executable stack on
4190 SPARC. */
4191
4192 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4193 ecs->random_signal
4194 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4195 || stopped_by_watchpoint
4196 || ecs->event_thread->control.trap_expected
4197 || (ecs->event_thread->control.step_range_end
4198 && (ecs->event_thread->control.step_resume_breakpoint
4199 == NULL)));
4200 else
4201 {
4202 ecs->random_signal = !bpstat_explains_signal
4203 (ecs->event_thread->control.stop_bpstat);
4204 if (!ecs->random_signal)
4205 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4206 }
4207 }
4208
4209 /* When we reach this point, we've pretty much decided
4210 that the reason for stopping must've been a random
4211 (unexpected) signal. */
4212
4213 else
4214 ecs->random_signal = 1;
4215
4216 process_event_stop_test:
4217
4218 /* Re-fetch current thread's frame in case we did a
4219 "goto process_event_stop_test" above. */
4220 frame = get_current_frame ();
4221 gdbarch = get_frame_arch (frame);
4222
4223 /* For the program's own signals, act according to
4224 the signal handling tables. */
4225
4226 if (ecs->random_signal)
4227 {
4228 /* Signal not for debugging purposes. */
4229 int printed = 0;
4230 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4231
4232 if (debug_infrun)
4233 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4234 ecs->event_thread->suspend.stop_signal);
4235
4236 stopped_by_random_signal = 1;
4237
4238 if (signal_print[ecs->event_thread->suspend.stop_signal])
4239 {
4240 printed = 1;
4241 target_terminal_ours_for_output ();
4242 print_signal_received_reason
4243 (ecs->event_thread->suspend.stop_signal);
4244 }
4245 /* Always stop on signals if we're either just gaining control
4246 of the program, or the user explicitly requested this thread
4247 to remain stopped. */
4248 if (stop_soon != NO_STOP_QUIETLY
4249 || ecs->event_thread->stop_requested
4250 || (!inf->detaching
4251 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4252 {
4253 stop_stepping (ecs);
4254 return;
4255 }
4256 /* If not going to stop, give terminal back
4257 if we took it away. */
4258 else if (printed)
4259 target_terminal_inferior ();
4260
4261 /* Clear the signal if it should not be passed. */
4262 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4263 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4264
4265 if (ecs->event_thread->prev_pc == stop_pc
4266 && ecs->event_thread->control.trap_expected
4267 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4268 {
4269 /* We were just starting a new sequence, attempting to
4270 single-step off of a breakpoint and expecting a SIGTRAP.
4271 Instead this signal arrives. This signal will take us out
4272 of the stepping range so GDB needs to remember to, when
4273 the signal handler returns, resume stepping off that
4274 breakpoint. */
4275 /* To simplify things, "continue" is forced to use the same
4276 code paths as single-step - set a breakpoint at the
4277 signal return address and then, once hit, step off that
4278 breakpoint. */
4279 if (debug_infrun)
4280 fprintf_unfiltered (gdb_stdlog,
4281 "infrun: signal arrived while stepping over "
4282 "breakpoint\n");
4283
4284 insert_hp_step_resume_breakpoint_at_frame (frame);
4285 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4286 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4287 ecs->event_thread->control.trap_expected = 0;
4288 keep_going (ecs);
4289 return;
4290 }
4291
4292 if (ecs->event_thread->control.step_range_end != 0
4293 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4294 && (ecs->event_thread->control.step_range_start <= stop_pc
4295 && stop_pc < ecs->event_thread->control.step_range_end)
4296 && frame_id_eq (get_stack_frame_id (frame),
4297 ecs->event_thread->control.step_stack_frame_id)
4298 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4299 {
4300 /* The inferior is about to take a signal that will take it
4301 out of the single step range. Set a breakpoint at the
4302 current PC (which is presumably where the signal handler
4303 will eventually return) and then allow the inferior to
4304 run free.
4305
4306 Note that this is only needed for a signal delivered
4307 while in the single-step range. Nested signals aren't a
4308 problem as they eventually all return. */
4309 if (debug_infrun)
4310 fprintf_unfiltered (gdb_stdlog,
4311 "infrun: signal may take us out of "
4312 "single-step range\n");
4313
4314 insert_hp_step_resume_breakpoint_at_frame (frame);
4315 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4316 ecs->event_thread->control.trap_expected = 0;
4317 keep_going (ecs);
4318 return;
4319 }
4320
4321 /* Note: step_resume_breakpoint may be non-NULL. This occures
4322 when either there's a nested signal, or when there's a
4323 pending signal enabled just as the signal handler returns
4324 (leaving the inferior at the step-resume-breakpoint without
4325 actually executing it). Either way continue until the
4326 breakpoint is really hit. */
4327 keep_going (ecs);
4328 return;
4329 }
4330
4331 /* Handle cases caused by hitting a breakpoint. */
4332 {
4333 CORE_ADDR jmp_buf_pc;
4334 struct bpstat_what what;
4335
4336 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4337
4338 if (what.call_dummy)
4339 {
4340 stop_stack_dummy = what.call_dummy;
4341 }
4342
4343 /* If we hit an internal event that triggers symbol changes, the
4344 current frame will be invalidated within bpstat_what (e.g., if
4345 we hit an internal solib event). Re-fetch it. */
4346 frame = get_current_frame ();
4347 gdbarch = get_frame_arch (frame);
4348
4349 switch (what.main_action)
4350 {
4351 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4352 /* If we hit the breakpoint at longjmp while stepping, we
4353 install a momentary breakpoint at the target of the
4354 jmp_buf. */
4355
4356 if (debug_infrun)
4357 fprintf_unfiltered (gdb_stdlog,
4358 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4359
4360 ecs->event_thread->stepping_over_breakpoint = 1;
4361
4362 if (what.is_longjmp)
4363 {
4364 if (!gdbarch_get_longjmp_target_p (gdbarch)
4365 || !gdbarch_get_longjmp_target (gdbarch,
4366 frame, &jmp_buf_pc))
4367 {
4368 if (debug_infrun)
4369 fprintf_unfiltered (gdb_stdlog,
4370 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4371 "(!gdbarch_get_longjmp_target)\n");
4372 keep_going (ecs);
4373 return;
4374 }
4375
4376 /* We're going to replace the current step-resume breakpoint
4377 with a longjmp-resume breakpoint. */
4378 delete_step_resume_breakpoint (ecs->event_thread);
4379
4380 /* Insert a breakpoint at resume address. */
4381 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4382 }
4383 else
4384 {
4385 struct symbol *func = get_frame_function (frame);
4386
4387 if (func)
4388 check_exception_resume (ecs, frame, func);
4389 }
4390 keep_going (ecs);
4391 return;
4392
4393 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4394 if (debug_infrun)
4395 fprintf_unfiltered (gdb_stdlog,
4396 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4397
4398 if (what.is_longjmp)
4399 {
4400 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4401 != NULL);
4402 delete_step_resume_breakpoint (ecs->event_thread);
4403 }
4404 else
4405 {
4406 /* There are several cases to consider.
4407
4408 1. The initiating frame no longer exists. In this case
4409 we must stop, because the exception has gone too far.
4410
4411 2. The initiating frame exists, and is the same as the
4412 current frame. We stop, because the exception has been
4413 caught.
4414
4415 3. The initiating frame exists and is different from
4416 the current frame. This means the exception has been
4417 caught beneath the initiating frame, so keep going. */
4418 struct frame_info *init_frame
4419 = frame_find_by_id (ecs->event_thread->initiating_frame);
4420
4421 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4422 != NULL);
4423 delete_exception_resume_breakpoint (ecs->event_thread);
4424
4425 if (init_frame)
4426 {
4427 struct frame_id current_id
4428 = get_frame_id (get_current_frame ());
4429 if (frame_id_eq (current_id,
4430 ecs->event_thread->initiating_frame))
4431 {
4432 /* Case 2. Fall through. */
4433 }
4434 else
4435 {
4436 /* Case 3. */
4437 keep_going (ecs);
4438 return;
4439 }
4440 }
4441
4442 /* For Cases 1 and 2, remove the step-resume breakpoint,
4443 if it exists. */
4444 delete_step_resume_breakpoint (ecs->event_thread);
4445 }
4446
4447 ecs->event_thread->control.stop_step = 1;
4448 print_end_stepping_range_reason ();
4449 stop_stepping (ecs);
4450 return;
4451
4452 case BPSTAT_WHAT_SINGLE:
4453 if (debug_infrun)
4454 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4455 ecs->event_thread->stepping_over_breakpoint = 1;
4456 /* Still need to check other stuff, at least the case
4457 where we are stepping and step out of the right range. */
4458 break;
4459
4460 case BPSTAT_WHAT_STEP_RESUME:
4461 if (debug_infrun)
4462 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4463
4464 delete_step_resume_breakpoint (ecs->event_thread);
4465 if (ecs->event_thread->control.proceed_to_finish
4466 && execution_direction == EXEC_REVERSE)
4467 {
4468 struct thread_info *tp = ecs->event_thread;
4469
4470 /* We are finishing a function in reverse, and just hit
4471 the step-resume breakpoint at the start address of the
4472 function, and we're almost there -- just need to back
4473 up by one more single-step, which should take us back
4474 to the function call. */
4475 tp->control.step_range_start = tp->control.step_range_end = 1;
4476 keep_going (ecs);
4477 return;
4478 }
4479 fill_in_stop_func (gdbarch, ecs);
4480 if (stop_pc == ecs->stop_func_start
4481 && execution_direction == EXEC_REVERSE)
4482 {
4483 /* We are stepping over a function call in reverse, and
4484 just hit the step-resume breakpoint at the start
4485 address of the function. Go back to single-stepping,
4486 which should take us back to the function call. */
4487 ecs->event_thread->stepping_over_breakpoint = 1;
4488 keep_going (ecs);
4489 return;
4490 }
4491 break;
4492
4493 case BPSTAT_WHAT_STOP_NOISY:
4494 if (debug_infrun)
4495 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4496 stop_print_frame = 1;
4497
4498 /* We are about to nuke the step_resume_breakpointt via the
4499 cleanup chain, so no need to worry about it here. */
4500
4501 stop_stepping (ecs);
4502 return;
4503
4504 case BPSTAT_WHAT_STOP_SILENT:
4505 if (debug_infrun)
4506 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4507 stop_print_frame = 0;
4508
4509 /* We are about to nuke the step_resume_breakpoin via the
4510 cleanup chain, so no need to worry about it here. */
4511
4512 stop_stepping (ecs);
4513 return;
4514
4515 case BPSTAT_WHAT_HP_STEP_RESUME:
4516 if (debug_infrun)
4517 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4518
4519 delete_step_resume_breakpoint (ecs->event_thread);
4520 if (ecs->event_thread->step_after_step_resume_breakpoint)
4521 {
4522 /* Back when the step-resume breakpoint was inserted, we
4523 were trying to single-step off a breakpoint. Go back
4524 to doing that. */
4525 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4526 ecs->event_thread->stepping_over_breakpoint = 1;
4527 keep_going (ecs);
4528 return;
4529 }
4530 break;
4531
4532 case BPSTAT_WHAT_KEEP_CHECKING:
4533 break;
4534 }
4535 }
4536
4537 /* We come here if we hit a breakpoint but should not
4538 stop for it. Possibly we also were stepping
4539 and should stop for that. So fall through and
4540 test for stepping. But, if not stepping,
4541 do not stop. */
4542
4543 /* In all-stop mode, if we're currently stepping but have stopped in
4544 some other thread, we need to switch back to the stepped thread. */
4545 if (!non_stop)
4546 {
4547 struct thread_info *tp;
4548
4549 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4550 ecs->event_thread);
4551 if (tp)
4552 {
4553 /* However, if the current thread is blocked on some internal
4554 breakpoint, and we simply need to step over that breakpoint
4555 to get it going again, do that first. */
4556 if ((ecs->event_thread->control.trap_expected
4557 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4558 || ecs->event_thread->stepping_over_breakpoint)
4559 {
4560 keep_going (ecs);
4561 return;
4562 }
4563
4564 /* If the stepping thread exited, then don't try to switch
4565 back and resume it, which could fail in several different
4566 ways depending on the target. Instead, just keep going.
4567
4568 We can find a stepping dead thread in the thread list in
4569 two cases:
4570
4571 - The target supports thread exit events, and when the
4572 target tries to delete the thread from the thread list,
4573 inferior_ptid pointed at the exiting thread. In such
4574 case, calling delete_thread does not really remove the
4575 thread from the list; instead, the thread is left listed,
4576 with 'exited' state.
4577
4578 - The target's debug interface does not support thread
4579 exit events, and so we have no idea whatsoever if the
4580 previously stepping thread is still alive. For that
4581 reason, we need to synchronously query the target
4582 now. */
4583 if (is_exited (tp->ptid)
4584 || !target_thread_alive (tp->ptid))
4585 {
4586 if (debug_infrun)
4587 fprintf_unfiltered (gdb_stdlog,
4588 "infrun: not switching back to "
4589 "stepped thread, it has vanished\n");
4590
4591 delete_thread (tp->ptid);
4592 keep_going (ecs);
4593 return;
4594 }
4595
4596 /* Otherwise, we no longer expect a trap in the current thread.
4597 Clear the trap_expected flag before switching back -- this is
4598 what keep_going would do as well, if we called it. */
4599 ecs->event_thread->control.trap_expected = 0;
4600
4601 if (debug_infrun)
4602 fprintf_unfiltered (gdb_stdlog,
4603 "infrun: switching back to stepped thread\n");
4604
4605 ecs->event_thread = tp;
4606 ecs->ptid = tp->ptid;
4607 context_switch (ecs->ptid);
4608 keep_going (ecs);
4609 return;
4610 }
4611 }
4612
4613 if (ecs->event_thread->control.step_resume_breakpoint)
4614 {
4615 if (debug_infrun)
4616 fprintf_unfiltered (gdb_stdlog,
4617 "infrun: step-resume breakpoint is inserted\n");
4618
4619 /* Having a step-resume breakpoint overrides anything
4620 else having to do with stepping commands until
4621 that breakpoint is reached. */
4622 keep_going (ecs);
4623 return;
4624 }
4625
4626 if (ecs->event_thread->control.step_range_end == 0)
4627 {
4628 if (debug_infrun)
4629 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4630 /* Likewise if we aren't even stepping. */
4631 keep_going (ecs);
4632 return;
4633 }
4634
4635 /* Re-fetch current thread's frame in case the code above caused
4636 the frame cache to be re-initialized, making our FRAME variable
4637 a dangling pointer. */
4638 frame = get_current_frame ();
4639 gdbarch = get_frame_arch (frame);
4640 fill_in_stop_func (gdbarch, ecs);
4641
4642 /* If stepping through a line, keep going if still within it.
4643
4644 Note that step_range_end is the address of the first instruction
4645 beyond the step range, and NOT the address of the last instruction
4646 within it!
4647
4648 Note also that during reverse execution, we may be stepping
4649 through a function epilogue and therefore must detect when
4650 the current-frame changes in the middle of a line. */
4651
4652 if (stop_pc >= ecs->event_thread->control.step_range_start
4653 && stop_pc < ecs->event_thread->control.step_range_end
4654 && (execution_direction != EXEC_REVERSE
4655 || frame_id_eq (get_frame_id (frame),
4656 ecs->event_thread->control.step_frame_id)))
4657 {
4658 if (debug_infrun)
4659 fprintf_unfiltered
4660 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4661 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4662 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4663
4664 /* When stepping backward, stop at beginning of line range
4665 (unless it's the function entry point, in which case
4666 keep going back to the call point). */
4667 if (stop_pc == ecs->event_thread->control.step_range_start
4668 && stop_pc != ecs->stop_func_start
4669 && execution_direction == EXEC_REVERSE)
4670 {
4671 ecs->event_thread->control.stop_step = 1;
4672 print_end_stepping_range_reason ();
4673 stop_stepping (ecs);
4674 }
4675 else
4676 keep_going (ecs);
4677
4678 return;
4679 }
4680
4681 /* We stepped out of the stepping range. */
4682
4683 /* If we are stepping at the source level and entered the runtime
4684 loader dynamic symbol resolution code...
4685
4686 EXEC_FORWARD: we keep on single stepping until we exit the run
4687 time loader code and reach the callee's address.
4688
4689 EXEC_REVERSE: we've already executed the callee (backward), and
4690 the runtime loader code is handled just like any other
4691 undebuggable function call. Now we need only keep stepping
4692 backward through the trampoline code, and that's handled further
4693 down, so there is nothing for us to do here. */
4694
4695 if (execution_direction != EXEC_REVERSE
4696 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4697 && in_solib_dynsym_resolve_code (stop_pc))
4698 {
4699 CORE_ADDR pc_after_resolver =
4700 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4701
4702 if (debug_infrun)
4703 fprintf_unfiltered (gdb_stdlog,
4704 "infrun: stepped into dynsym resolve code\n");
4705
4706 if (pc_after_resolver)
4707 {
4708 /* Set up a step-resume breakpoint at the address
4709 indicated by SKIP_SOLIB_RESOLVER. */
4710 struct symtab_and_line sr_sal;
4711
4712 init_sal (&sr_sal);
4713 sr_sal.pc = pc_after_resolver;
4714 sr_sal.pspace = get_frame_program_space (frame);
4715
4716 insert_step_resume_breakpoint_at_sal (gdbarch,
4717 sr_sal, null_frame_id);
4718 }
4719
4720 keep_going (ecs);
4721 return;
4722 }
4723
4724 if (ecs->event_thread->control.step_range_end != 1
4725 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4726 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4727 && get_frame_type (frame) == SIGTRAMP_FRAME)
4728 {
4729 if (debug_infrun)
4730 fprintf_unfiltered (gdb_stdlog,
4731 "infrun: stepped into signal trampoline\n");
4732 /* The inferior, while doing a "step" or "next", has ended up in
4733 a signal trampoline (either by a signal being delivered or by
4734 the signal handler returning). Just single-step until the
4735 inferior leaves the trampoline (either by calling the handler
4736 or returning). */
4737 keep_going (ecs);
4738 return;
4739 }
4740
4741 /* Check for subroutine calls. The check for the current frame
4742 equalling the step ID is not necessary - the check of the
4743 previous frame's ID is sufficient - but it is a common case and
4744 cheaper than checking the previous frame's ID.
4745
4746 NOTE: frame_id_eq will never report two invalid frame IDs as
4747 being equal, so to get into this block, both the current and
4748 previous frame must have valid frame IDs. */
4749 /* The outer_frame_id check is a heuristic to detect stepping
4750 through startup code. If we step over an instruction which
4751 sets the stack pointer from an invalid value to a valid value,
4752 we may detect that as a subroutine call from the mythical
4753 "outermost" function. This could be fixed by marking
4754 outermost frames as !stack_p,code_p,special_p. Then the
4755 initial outermost frame, before sp was valid, would
4756 have code_addr == &_start. See the comment in frame_id_eq
4757 for more. */
4758 if (!frame_id_eq (get_stack_frame_id (frame),
4759 ecs->event_thread->control.step_stack_frame_id)
4760 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4761 ecs->event_thread->control.step_stack_frame_id)
4762 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4763 outer_frame_id)
4764 || step_start_function != find_pc_function (stop_pc))))
4765 {
4766 CORE_ADDR real_stop_pc;
4767
4768 if (debug_infrun)
4769 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4770
4771 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4772 || ((ecs->event_thread->control.step_range_end == 1)
4773 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4774 ecs->stop_func_start)))
4775 {
4776 /* I presume that step_over_calls is only 0 when we're
4777 supposed to be stepping at the assembly language level
4778 ("stepi"). Just stop. */
4779 /* Also, maybe we just did a "nexti" inside a prolog, so we
4780 thought it was a subroutine call but it was not. Stop as
4781 well. FENN */
4782 /* And this works the same backward as frontward. MVS */
4783 ecs->event_thread->control.stop_step = 1;
4784 print_end_stepping_range_reason ();
4785 stop_stepping (ecs);
4786 return;
4787 }
4788
4789 /* Reverse stepping through solib trampolines. */
4790
4791 if (execution_direction == EXEC_REVERSE
4792 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4793 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4794 || (ecs->stop_func_start == 0
4795 && in_solib_dynsym_resolve_code (stop_pc))))
4796 {
4797 /* Any solib trampoline code can be handled in reverse
4798 by simply continuing to single-step. We have already
4799 executed the solib function (backwards), and a few
4800 steps will take us back through the trampoline to the
4801 caller. */
4802 keep_going (ecs);
4803 return;
4804 }
4805
4806 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4807 {
4808 /* We're doing a "next".
4809
4810 Normal (forward) execution: set a breakpoint at the
4811 callee's return address (the address at which the caller
4812 will resume).
4813
4814 Reverse (backward) execution. set the step-resume
4815 breakpoint at the start of the function that we just
4816 stepped into (backwards), and continue to there. When we
4817 get there, we'll need to single-step back to the caller. */
4818
4819 if (execution_direction == EXEC_REVERSE)
4820 {
4821 struct symtab_and_line sr_sal;
4822
4823 /* Normal function call return (static or dynamic). */
4824 init_sal (&sr_sal);
4825 sr_sal.pc = ecs->stop_func_start;
4826 sr_sal.pspace = get_frame_program_space (frame);
4827 insert_step_resume_breakpoint_at_sal (gdbarch,
4828 sr_sal, null_frame_id);
4829 }
4830 else
4831 insert_step_resume_breakpoint_at_caller (frame);
4832
4833 keep_going (ecs);
4834 return;
4835 }
4836
4837 /* If we are in a function call trampoline (a stub between the
4838 calling routine and the real function), locate the real
4839 function. That's what tells us (a) whether we want to step
4840 into it at all, and (b) what prologue we want to run to the
4841 end of, if we do step into it. */
4842 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4843 if (real_stop_pc == 0)
4844 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4845 if (real_stop_pc != 0)
4846 ecs->stop_func_start = real_stop_pc;
4847
4848 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4849 {
4850 struct symtab_and_line sr_sal;
4851
4852 init_sal (&sr_sal);
4853 sr_sal.pc = ecs->stop_func_start;
4854 sr_sal.pspace = get_frame_program_space (frame);
4855
4856 insert_step_resume_breakpoint_at_sal (gdbarch,
4857 sr_sal, null_frame_id);
4858 keep_going (ecs);
4859 return;
4860 }
4861
4862 /* If we have line number information for the function we are
4863 thinking of stepping into, step into it.
4864
4865 If there are several symtabs at that PC (e.g. with include
4866 files), just want to know whether *any* of them have line
4867 numbers. find_pc_line handles this. */
4868 {
4869 struct symtab_and_line tmp_sal;
4870
4871 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4872 if (tmp_sal.line != 0)
4873 {
4874 if (execution_direction == EXEC_REVERSE)
4875 handle_step_into_function_backward (gdbarch, ecs);
4876 else
4877 handle_step_into_function (gdbarch, ecs);
4878 return;
4879 }
4880 }
4881
4882 /* If we have no line number and the step-stop-if-no-debug is
4883 set, we stop the step so that the user has a chance to switch
4884 in assembly mode. */
4885 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4886 && step_stop_if_no_debug)
4887 {
4888 ecs->event_thread->control.stop_step = 1;
4889 print_end_stepping_range_reason ();
4890 stop_stepping (ecs);
4891 return;
4892 }
4893
4894 if (execution_direction == EXEC_REVERSE)
4895 {
4896 /* Set a breakpoint at callee's start address.
4897 From there we can step once and be back in the caller. */
4898 struct symtab_and_line sr_sal;
4899
4900 init_sal (&sr_sal);
4901 sr_sal.pc = ecs->stop_func_start;
4902 sr_sal.pspace = get_frame_program_space (frame);
4903 insert_step_resume_breakpoint_at_sal (gdbarch,
4904 sr_sal, null_frame_id);
4905 }
4906 else
4907 /* Set a breakpoint at callee's return address (the address
4908 at which the caller will resume). */
4909 insert_step_resume_breakpoint_at_caller (frame);
4910
4911 keep_going (ecs);
4912 return;
4913 }
4914
4915 /* Reverse stepping through solib trampolines. */
4916
4917 if (execution_direction == EXEC_REVERSE
4918 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4919 {
4920 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4921 || (ecs->stop_func_start == 0
4922 && in_solib_dynsym_resolve_code (stop_pc)))
4923 {
4924 /* Any solib trampoline code can be handled in reverse
4925 by simply continuing to single-step. We have already
4926 executed the solib function (backwards), and a few
4927 steps will take us back through the trampoline to the
4928 caller. */
4929 keep_going (ecs);
4930 return;
4931 }
4932 else if (in_solib_dynsym_resolve_code (stop_pc))
4933 {
4934 /* Stepped backward into the solib dynsym resolver.
4935 Set a breakpoint at its start and continue, then
4936 one more step will take us out. */
4937 struct symtab_and_line sr_sal;
4938
4939 init_sal (&sr_sal);
4940 sr_sal.pc = ecs->stop_func_start;
4941 sr_sal.pspace = get_frame_program_space (frame);
4942 insert_step_resume_breakpoint_at_sal (gdbarch,
4943 sr_sal, null_frame_id);
4944 keep_going (ecs);
4945 return;
4946 }
4947 }
4948
4949 /* If we're in the return path from a shared library trampoline,
4950 we want to proceed through the trampoline when stepping. */
4951 if (gdbarch_in_solib_return_trampoline (gdbarch,
4952 stop_pc, ecs->stop_func_name))
4953 {
4954 /* Determine where this trampoline returns. */
4955 CORE_ADDR real_stop_pc;
4956
4957 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4958
4959 if (debug_infrun)
4960 fprintf_unfiltered (gdb_stdlog,
4961 "infrun: stepped into solib return tramp\n");
4962
4963 /* Only proceed through if we know where it's going. */
4964 if (real_stop_pc)
4965 {
4966 /* And put the step-breakpoint there and go until there. */
4967 struct symtab_and_line sr_sal;
4968
4969 init_sal (&sr_sal); /* initialize to zeroes */
4970 sr_sal.pc = real_stop_pc;
4971 sr_sal.section = find_pc_overlay (sr_sal.pc);
4972 sr_sal.pspace = get_frame_program_space (frame);
4973
4974 /* Do not specify what the fp should be when we stop since
4975 on some machines the prologue is where the new fp value
4976 is established. */
4977 insert_step_resume_breakpoint_at_sal (gdbarch,
4978 sr_sal, null_frame_id);
4979
4980 /* Restart without fiddling with the step ranges or
4981 other state. */
4982 keep_going (ecs);
4983 return;
4984 }
4985 }
4986
4987 stop_pc_sal = find_pc_line (stop_pc, 0);
4988
4989 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4990 the trampoline processing logic, however, there are some trampolines
4991 that have no names, so we should do trampoline handling first. */
4992 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4993 && ecs->stop_func_name == NULL
4994 && stop_pc_sal.line == 0)
4995 {
4996 if (debug_infrun)
4997 fprintf_unfiltered (gdb_stdlog,
4998 "infrun: stepped into undebuggable function\n");
4999
5000 /* The inferior just stepped into, or returned to, an
5001 undebuggable function (where there is no debugging information
5002 and no line number corresponding to the address where the
5003 inferior stopped). Since we want to skip this kind of code,
5004 we keep going until the inferior returns from this
5005 function - unless the user has asked us not to (via
5006 set step-mode) or we no longer know how to get back
5007 to the call site. */
5008 if (step_stop_if_no_debug
5009 || !frame_id_p (frame_unwind_caller_id (frame)))
5010 {
5011 /* If we have no line number and the step-stop-if-no-debug
5012 is set, we stop the step so that the user has a chance to
5013 switch in assembly mode. */
5014 ecs->event_thread->control.stop_step = 1;
5015 print_end_stepping_range_reason ();
5016 stop_stepping (ecs);
5017 return;
5018 }
5019 else
5020 {
5021 /* Set a breakpoint at callee's return address (the address
5022 at which the caller will resume). */
5023 insert_step_resume_breakpoint_at_caller (frame);
5024 keep_going (ecs);
5025 return;
5026 }
5027 }
5028
5029 if (ecs->event_thread->control.step_range_end == 1)
5030 {
5031 /* It is stepi or nexti. We always want to stop stepping after
5032 one instruction. */
5033 if (debug_infrun)
5034 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5035 ecs->event_thread->control.stop_step = 1;
5036 print_end_stepping_range_reason ();
5037 stop_stepping (ecs);
5038 return;
5039 }
5040
5041 if (stop_pc_sal.line == 0)
5042 {
5043 /* We have no line number information. That means to stop
5044 stepping (does this always happen right after one instruction,
5045 when we do "s" in a function with no line numbers,
5046 or can this happen as a result of a return or longjmp?). */
5047 if (debug_infrun)
5048 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5049 ecs->event_thread->control.stop_step = 1;
5050 print_end_stepping_range_reason ();
5051 stop_stepping (ecs);
5052 return;
5053 }
5054
5055 /* Look for "calls" to inlined functions, part one. If the inline
5056 frame machinery detected some skipped call sites, we have entered
5057 a new inline function. */
5058
5059 if (frame_id_eq (get_frame_id (get_current_frame ()),
5060 ecs->event_thread->control.step_frame_id)
5061 && inline_skipped_frames (ecs->ptid))
5062 {
5063 struct symtab_and_line call_sal;
5064
5065 if (debug_infrun)
5066 fprintf_unfiltered (gdb_stdlog,
5067 "infrun: stepped into inlined function\n");
5068
5069 find_frame_sal (get_current_frame (), &call_sal);
5070
5071 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5072 {
5073 /* For "step", we're going to stop. But if the call site
5074 for this inlined function is on the same source line as
5075 we were previously stepping, go down into the function
5076 first. Otherwise stop at the call site. */
5077
5078 if (call_sal.line == ecs->event_thread->current_line
5079 && call_sal.symtab == ecs->event_thread->current_symtab)
5080 step_into_inline_frame (ecs->ptid);
5081
5082 ecs->event_thread->control.stop_step = 1;
5083 print_end_stepping_range_reason ();
5084 stop_stepping (ecs);
5085 return;
5086 }
5087 else
5088 {
5089 /* For "next", we should stop at the call site if it is on a
5090 different source line. Otherwise continue through the
5091 inlined function. */
5092 if (call_sal.line == ecs->event_thread->current_line
5093 && call_sal.symtab == ecs->event_thread->current_symtab)
5094 keep_going (ecs);
5095 else
5096 {
5097 ecs->event_thread->control.stop_step = 1;
5098 print_end_stepping_range_reason ();
5099 stop_stepping (ecs);
5100 }
5101 return;
5102 }
5103 }
5104
5105 /* Look for "calls" to inlined functions, part two. If we are still
5106 in the same real function we were stepping through, but we have
5107 to go further up to find the exact frame ID, we are stepping
5108 through a more inlined call beyond its call site. */
5109
5110 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5111 && !frame_id_eq (get_frame_id (get_current_frame ()),
5112 ecs->event_thread->control.step_frame_id)
5113 && stepped_in_from (get_current_frame (),
5114 ecs->event_thread->control.step_frame_id))
5115 {
5116 if (debug_infrun)
5117 fprintf_unfiltered (gdb_stdlog,
5118 "infrun: stepping through inlined function\n");
5119
5120 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5121 keep_going (ecs);
5122 else
5123 {
5124 ecs->event_thread->control.stop_step = 1;
5125 print_end_stepping_range_reason ();
5126 stop_stepping (ecs);
5127 }
5128 return;
5129 }
5130
5131 if ((stop_pc == stop_pc_sal.pc)
5132 && (ecs->event_thread->current_line != stop_pc_sal.line
5133 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5134 {
5135 /* We are at the start of a different line. So stop. Note that
5136 we don't stop if we step into the middle of a different line.
5137 That is said to make things like for (;;) statements work
5138 better. */
5139 if (debug_infrun)
5140 fprintf_unfiltered (gdb_stdlog,
5141 "infrun: stepped to a different line\n");
5142 ecs->event_thread->control.stop_step = 1;
5143 print_end_stepping_range_reason ();
5144 stop_stepping (ecs);
5145 return;
5146 }
5147
5148 /* We aren't done stepping.
5149
5150 Optimize by setting the stepping range to the line.
5151 (We might not be in the original line, but if we entered a
5152 new line in mid-statement, we continue stepping. This makes
5153 things like for(;;) statements work better.) */
5154
5155 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5156 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5157 set_step_info (frame, stop_pc_sal);
5158
5159 if (debug_infrun)
5160 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5161 keep_going (ecs);
5162 }
5163
5164 /* Is thread TP in the middle of single-stepping? */
5165
5166 static int
5167 currently_stepping (struct thread_info *tp)
5168 {
5169 return ((tp->control.step_range_end
5170 && tp->control.step_resume_breakpoint == NULL)
5171 || tp->control.trap_expected
5172 || bpstat_should_step ());
5173 }
5174
5175 /* Returns true if any thread *but* the one passed in "data" is in the
5176 middle of stepping or of handling a "next". */
5177
5178 static int
5179 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5180 {
5181 if (tp == data)
5182 return 0;
5183
5184 return (tp->control.step_range_end
5185 || tp->control.trap_expected);
5186 }
5187
5188 /* Inferior has stepped into a subroutine call with source code that
5189 we should not step over. Do step to the first line of code in
5190 it. */
5191
5192 static void
5193 handle_step_into_function (struct gdbarch *gdbarch,
5194 struct execution_control_state *ecs)
5195 {
5196 struct symtab *s;
5197 struct symtab_and_line stop_func_sal, sr_sal;
5198
5199 fill_in_stop_func (gdbarch, ecs);
5200
5201 s = find_pc_symtab (stop_pc);
5202 if (s && s->language != language_asm)
5203 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5204 ecs->stop_func_start);
5205
5206 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5207 /* Use the step_resume_break to step until the end of the prologue,
5208 even if that involves jumps (as it seems to on the vax under
5209 4.2). */
5210 /* If the prologue ends in the middle of a source line, continue to
5211 the end of that source line (if it is still within the function).
5212 Otherwise, just go to end of prologue. */
5213 if (stop_func_sal.end
5214 && stop_func_sal.pc != ecs->stop_func_start
5215 && stop_func_sal.end < ecs->stop_func_end)
5216 ecs->stop_func_start = stop_func_sal.end;
5217
5218 /* Architectures which require breakpoint adjustment might not be able
5219 to place a breakpoint at the computed address. If so, the test
5220 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5221 ecs->stop_func_start to an address at which a breakpoint may be
5222 legitimately placed.
5223
5224 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5225 made, GDB will enter an infinite loop when stepping through
5226 optimized code consisting of VLIW instructions which contain
5227 subinstructions corresponding to different source lines. On
5228 FR-V, it's not permitted to place a breakpoint on any but the
5229 first subinstruction of a VLIW instruction. When a breakpoint is
5230 set, GDB will adjust the breakpoint address to the beginning of
5231 the VLIW instruction. Thus, we need to make the corresponding
5232 adjustment here when computing the stop address. */
5233
5234 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5235 {
5236 ecs->stop_func_start
5237 = gdbarch_adjust_breakpoint_address (gdbarch,
5238 ecs->stop_func_start);
5239 }
5240
5241 if (ecs->stop_func_start == stop_pc)
5242 {
5243 /* We are already there: stop now. */
5244 ecs->event_thread->control.stop_step = 1;
5245 print_end_stepping_range_reason ();
5246 stop_stepping (ecs);
5247 return;
5248 }
5249 else
5250 {
5251 /* Put the step-breakpoint there and go until there. */
5252 init_sal (&sr_sal); /* initialize to zeroes */
5253 sr_sal.pc = ecs->stop_func_start;
5254 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5255 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5256
5257 /* Do not specify what the fp should be when we stop since on
5258 some machines the prologue is where the new fp value is
5259 established. */
5260 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5261
5262 /* And make sure stepping stops right away then. */
5263 ecs->event_thread->control.step_range_end
5264 = ecs->event_thread->control.step_range_start;
5265 }
5266 keep_going (ecs);
5267 }
5268
5269 /* Inferior has stepped backward into a subroutine call with source
5270 code that we should not step over. Do step to the beginning of the
5271 last line of code in it. */
5272
5273 static void
5274 handle_step_into_function_backward (struct gdbarch *gdbarch,
5275 struct execution_control_state *ecs)
5276 {
5277 struct symtab *s;
5278 struct symtab_and_line stop_func_sal;
5279
5280 fill_in_stop_func (gdbarch, ecs);
5281
5282 s = find_pc_symtab (stop_pc);
5283 if (s && s->language != language_asm)
5284 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5285 ecs->stop_func_start);
5286
5287 stop_func_sal = find_pc_line (stop_pc, 0);
5288
5289 /* OK, we're just going to keep stepping here. */
5290 if (stop_func_sal.pc == stop_pc)
5291 {
5292 /* We're there already. Just stop stepping now. */
5293 ecs->event_thread->control.stop_step = 1;
5294 print_end_stepping_range_reason ();
5295 stop_stepping (ecs);
5296 }
5297 else
5298 {
5299 /* Else just reset the step range and keep going.
5300 No step-resume breakpoint, they don't work for
5301 epilogues, which can have multiple entry paths. */
5302 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5303 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5304 keep_going (ecs);
5305 }
5306 return;
5307 }
5308
5309 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5310 This is used to both functions and to skip over code. */
5311
5312 static void
5313 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5314 struct symtab_and_line sr_sal,
5315 struct frame_id sr_id,
5316 enum bptype sr_type)
5317 {
5318 /* There should never be more than one step-resume or longjmp-resume
5319 breakpoint per thread, so we should never be setting a new
5320 step_resume_breakpoint when one is already active. */
5321 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5322 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5323
5324 if (debug_infrun)
5325 fprintf_unfiltered (gdb_stdlog,
5326 "infrun: inserting step-resume breakpoint at %s\n",
5327 paddress (gdbarch, sr_sal.pc));
5328
5329 inferior_thread ()->control.step_resume_breakpoint
5330 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5331 }
5332
5333 void
5334 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5335 struct symtab_and_line sr_sal,
5336 struct frame_id sr_id)
5337 {
5338 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5339 sr_sal, sr_id,
5340 bp_step_resume);
5341 }
5342
5343 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5344 This is used to skip a potential signal handler.
5345
5346 This is called with the interrupted function's frame. The signal
5347 handler, when it returns, will resume the interrupted function at
5348 RETURN_FRAME.pc. */
5349
5350 static void
5351 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5352 {
5353 struct symtab_and_line sr_sal;
5354 struct gdbarch *gdbarch;
5355
5356 gdb_assert (return_frame != NULL);
5357 init_sal (&sr_sal); /* initialize to zeros */
5358
5359 gdbarch = get_frame_arch (return_frame);
5360 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5361 sr_sal.section = find_pc_overlay (sr_sal.pc);
5362 sr_sal.pspace = get_frame_program_space (return_frame);
5363
5364 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5365 get_stack_frame_id (return_frame),
5366 bp_hp_step_resume);
5367 }
5368
5369 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5370 is used to skip a function after stepping into it (for "next" or if
5371 the called function has no debugging information).
5372
5373 The current function has almost always been reached by single
5374 stepping a call or return instruction. NEXT_FRAME belongs to the
5375 current function, and the breakpoint will be set at the caller's
5376 resume address.
5377
5378 This is a separate function rather than reusing
5379 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5380 get_prev_frame, which may stop prematurely (see the implementation
5381 of frame_unwind_caller_id for an example). */
5382
5383 static void
5384 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5385 {
5386 struct symtab_and_line sr_sal;
5387 struct gdbarch *gdbarch;
5388
5389 /* We shouldn't have gotten here if we don't know where the call site
5390 is. */
5391 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5392
5393 init_sal (&sr_sal); /* initialize to zeros */
5394
5395 gdbarch = frame_unwind_caller_arch (next_frame);
5396 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5397 frame_unwind_caller_pc (next_frame));
5398 sr_sal.section = find_pc_overlay (sr_sal.pc);
5399 sr_sal.pspace = frame_unwind_program_space (next_frame);
5400
5401 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5402 frame_unwind_caller_id (next_frame));
5403 }
5404
5405 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5406 new breakpoint at the target of a jmp_buf. The handling of
5407 longjmp-resume uses the same mechanisms used for handling
5408 "step-resume" breakpoints. */
5409
5410 static void
5411 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5412 {
5413 /* There should never be more than one step-resume or longjmp-resume
5414 breakpoint per thread, so we should never be setting a new
5415 longjmp_resume_breakpoint when one is already active. */
5416 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5417
5418 if (debug_infrun)
5419 fprintf_unfiltered (gdb_stdlog,
5420 "infrun: inserting longjmp-resume breakpoint at %s\n",
5421 paddress (gdbarch, pc));
5422
5423 inferior_thread ()->control.step_resume_breakpoint =
5424 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5425 }
5426
5427 /* Insert an exception resume breakpoint. TP is the thread throwing
5428 the exception. The block B is the block of the unwinder debug hook
5429 function. FRAME is the frame corresponding to the call to this
5430 function. SYM is the symbol of the function argument holding the
5431 target PC of the exception. */
5432
5433 static void
5434 insert_exception_resume_breakpoint (struct thread_info *tp,
5435 struct block *b,
5436 struct frame_info *frame,
5437 struct symbol *sym)
5438 {
5439 struct gdb_exception e;
5440
5441 /* We want to ignore errors here. */
5442 TRY_CATCH (e, RETURN_MASK_ERROR)
5443 {
5444 struct symbol *vsym;
5445 struct value *value;
5446 CORE_ADDR handler;
5447 struct breakpoint *bp;
5448
5449 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5450 value = read_var_value (vsym, frame);
5451 /* If the value was optimized out, revert to the old behavior. */
5452 if (! value_optimized_out (value))
5453 {
5454 handler = value_as_address (value);
5455
5456 if (debug_infrun)
5457 fprintf_unfiltered (gdb_stdlog,
5458 "infrun: exception resume at %lx\n",
5459 (unsigned long) handler);
5460
5461 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5462 handler, bp_exception_resume);
5463 bp->thread = tp->num;
5464 inferior_thread ()->control.exception_resume_breakpoint = bp;
5465 }
5466 }
5467 }
5468
5469 /* This is called when an exception has been intercepted. Check to
5470 see whether the exception's destination is of interest, and if so,
5471 set an exception resume breakpoint there. */
5472
5473 static void
5474 check_exception_resume (struct execution_control_state *ecs,
5475 struct frame_info *frame, struct symbol *func)
5476 {
5477 struct gdb_exception e;
5478
5479 TRY_CATCH (e, RETURN_MASK_ERROR)
5480 {
5481 struct block *b;
5482 struct dict_iterator iter;
5483 struct symbol *sym;
5484 int argno = 0;
5485
5486 /* The exception breakpoint is a thread-specific breakpoint on
5487 the unwinder's debug hook, declared as:
5488
5489 void _Unwind_DebugHook (void *cfa, void *handler);
5490
5491 The CFA argument indicates the frame to which control is
5492 about to be transferred. HANDLER is the destination PC.
5493
5494 We ignore the CFA and set a temporary breakpoint at HANDLER.
5495 This is not extremely efficient but it avoids issues in gdb
5496 with computing the DWARF CFA, and it also works even in weird
5497 cases such as throwing an exception from inside a signal
5498 handler. */
5499
5500 b = SYMBOL_BLOCK_VALUE (func);
5501 ALL_BLOCK_SYMBOLS (b, iter, sym)
5502 {
5503 if (!SYMBOL_IS_ARGUMENT (sym))
5504 continue;
5505
5506 if (argno == 0)
5507 ++argno;
5508 else
5509 {
5510 insert_exception_resume_breakpoint (ecs->event_thread,
5511 b, frame, sym);
5512 break;
5513 }
5514 }
5515 }
5516 }
5517
5518 static void
5519 stop_stepping (struct execution_control_state *ecs)
5520 {
5521 if (debug_infrun)
5522 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5523
5524 /* Let callers know we don't want to wait for the inferior anymore. */
5525 ecs->wait_some_more = 0;
5526 }
5527
5528 /* This function handles various cases where we need to continue
5529 waiting for the inferior. */
5530 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5531
5532 static void
5533 keep_going (struct execution_control_state *ecs)
5534 {
5535 /* Make sure normal_stop is called if we get a QUIT handled before
5536 reaching resume. */
5537 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5538
5539 /* Save the pc before execution, to compare with pc after stop. */
5540 ecs->event_thread->prev_pc
5541 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5542
5543 /* If we did not do break;, it means we should keep running the
5544 inferior and not return to debugger. */
5545
5546 if (ecs->event_thread->control.trap_expected
5547 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5548 {
5549 /* We took a signal (which we are supposed to pass through to
5550 the inferior, else we'd not get here) and we haven't yet
5551 gotten our trap. Simply continue. */
5552
5553 discard_cleanups (old_cleanups);
5554 resume (currently_stepping (ecs->event_thread),
5555 ecs->event_thread->suspend.stop_signal);
5556 }
5557 else
5558 {
5559 /* Either the trap was not expected, but we are continuing
5560 anyway (the user asked that this signal be passed to the
5561 child)
5562 -- or --
5563 The signal was SIGTRAP, e.g. it was our signal, but we
5564 decided we should resume from it.
5565
5566 We're going to run this baby now!
5567
5568 Note that insert_breakpoints won't try to re-insert
5569 already inserted breakpoints. Therefore, we don't
5570 care if breakpoints were already inserted, or not. */
5571
5572 if (ecs->event_thread->stepping_over_breakpoint)
5573 {
5574 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5575
5576 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5577 /* Since we can't do a displaced step, we have to remove
5578 the breakpoint while we step it. To keep things
5579 simple, we remove them all. */
5580 remove_breakpoints ();
5581 }
5582 else
5583 {
5584 struct gdb_exception e;
5585
5586 /* Stop stepping when inserting breakpoints
5587 has failed. */
5588 TRY_CATCH (e, RETURN_MASK_ERROR)
5589 {
5590 insert_breakpoints ();
5591 }
5592 if (e.reason < 0)
5593 {
5594 exception_print (gdb_stderr, e);
5595 stop_stepping (ecs);
5596 return;
5597 }
5598 }
5599
5600 ecs->event_thread->control.trap_expected
5601 = ecs->event_thread->stepping_over_breakpoint;
5602
5603 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5604 specifies that such a signal should be delivered to the
5605 target program).
5606
5607 Typically, this would occure when a user is debugging a
5608 target monitor on a simulator: the target monitor sets a
5609 breakpoint; the simulator encounters this break-point and
5610 halts the simulation handing control to GDB; GDB, noteing
5611 that the break-point isn't valid, returns control back to the
5612 simulator; the simulator then delivers the hardware
5613 equivalent of a SIGNAL_TRAP to the program being debugged. */
5614
5615 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5616 && !signal_program[ecs->event_thread->suspend.stop_signal])
5617 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5618
5619 discard_cleanups (old_cleanups);
5620 resume (currently_stepping (ecs->event_thread),
5621 ecs->event_thread->suspend.stop_signal);
5622 }
5623
5624 prepare_to_wait (ecs);
5625 }
5626
5627 /* This function normally comes after a resume, before
5628 handle_inferior_event exits. It takes care of any last bits of
5629 housekeeping, and sets the all-important wait_some_more flag. */
5630
5631 static void
5632 prepare_to_wait (struct execution_control_state *ecs)
5633 {
5634 if (debug_infrun)
5635 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5636
5637 /* This is the old end of the while loop. Let everybody know we
5638 want to wait for the inferior some more and get called again
5639 soon. */
5640 ecs->wait_some_more = 1;
5641 }
5642
5643 /* Several print_*_reason functions to print why the inferior has stopped.
5644 We always print something when the inferior exits, or receives a signal.
5645 The rest of the cases are dealt with later on in normal_stop and
5646 print_it_typical. Ideally there should be a call to one of these
5647 print_*_reason functions functions from handle_inferior_event each time
5648 stop_stepping is called. */
5649
5650 /* Print why the inferior has stopped.
5651 We are done with a step/next/si/ni command, print why the inferior has
5652 stopped. For now print nothing. Print a message only if not in the middle
5653 of doing a "step n" operation for n > 1. */
5654
5655 static void
5656 print_end_stepping_range_reason (void)
5657 {
5658 if ((!inferior_thread ()->step_multi
5659 || !inferior_thread ()->control.stop_step)
5660 && ui_out_is_mi_like_p (current_uiout))
5661 ui_out_field_string (current_uiout, "reason",
5662 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5663 }
5664
5665 /* The inferior was terminated by a signal, print why it stopped. */
5666
5667 static void
5668 print_signal_exited_reason (enum target_signal siggnal)
5669 {
5670 struct ui_out *uiout = current_uiout;
5671
5672 annotate_signalled ();
5673 if (ui_out_is_mi_like_p (uiout))
5674 ui_out_field_string
5675 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5676 ui_out_text (uiout, "\nProgram terminated with signal ");
5677 annotate_signal_name ();
5678 ui_out_field_string (uiout, "signal-name",
5679 target_signal_to_name (siggnal));
5680 annotate_signal_name_end ();
5681 ui_out_text (uiout, ", ");
5682 annotate_signal_string ();
5683 ui_out_field_string (uiout, "signal-meaning",
5684 target_signal_to_string (siggnal));
5685 annotate_signal_string_end ();
5686 ui_out_text (uiout, ".\n");
5687 ui_out_text (uiout, "The program no longer exists.\n");
5688 }
5689
5690 /* The inferior program is finished, print why it stopped. */
5691
5692 static void
5693 print_exited_reason (int exitstatus)
5694 {
5695 struct inferior *inf = current_inferior ();
5696 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5697 struct ui_out *uiout = current_uiout;
5698
5699 annotate_exited (exitstatus);
5700 if (exitstatus)
5701 {
5702 if (ui_out_is_mi_like_p (uiout))
5703 ui_out_field_string (uiout, "reason",
5704 async_reason_lookup (EXEC_ASYNC_EXITED));
5705 ui_out_text (uiout, "[Inferior ");
5706 ui_out_text (uiout, plongest (inf->num));
5707 ui_out_text (uiout, " (");
5708 ui_out_text (uiout, pidstr);
5709 ui_out_text (uiout, ") exited with code ");
5710 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5711 ui_out_text (uiout, "]\n");
5712 }
5713 else
5714 {
5715 if (ui_out_is_mi_like_p (uiout))
5716 ui_out_field_string
5717 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5718 ui_out_text (uiout, "[Inferior ");
5719 ui_out_text (uiout, plongest (inf->num));
5720 ui_out_text (uiout, " (");
5721 ui_out_text (uiout, pidstr);
5722 ui_out_text (uiout, ") exited normally]\n");
5723 }
5724 /* Support the --return-child-result option. */
5725 return_child_result_value = exitstatus;
5726 }
5727
5728 /* Signal received, print why the inferior has stopped. The signal table
5729 tells us to print about it. */
5730
5731 static void
5732 print_signal_received_reason (enum target_signal siggnal)
5733 {
5734 struct ui_out *uiout = current_uiout;
5735
5736 annotate_signal ();
5737
5738 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5739 {
5740 struct thread_info *t = inferior_thread ();
5741
5742 ui_out_text (uiout, "\n[");
5743 ui_out_field_string (uiout, "thread-name",
5744 target_pid_to_str (t->ptid));
5745 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5746 ui_out_text (uiout, " stopped");
5747 }
5748 else
5749 {
5750 ui_out_text (uiout, "\nProgram received signal ");
5751 annotate_signal_name ();
5752 if (ui_out_is_mi_like_p (uiout))
5753 ui_out_field_string
5754 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5755 ui_out_field_string (uiout, "signal-name",
5756 target_signal_to_name (siggnal));
5757 annotate_signal_name_end ();
5758 ui_out_text (uiout, ", ");
5759 annotate_signal_string ();
5760 ui_out_field_string (uiout, "signal-meaning",
5761 target_signal_to_string (siggnal));
5762 annotate_signal_string_end ();
5763 }
5764 ui_out_text (uiout, ".\n");
5765 }
5766
5767 /* Reverse execution: target ran out of history info, print why the inferior
5768 has stopped. */
5769
5770 static void
5771 print_no_history_reason (void)
5772 {
5773 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5774 }
5775
5776 /* Here to return control to GDB when the inferior stops for real.
5777 Print appropriate messages, remove breakpoints, give terminal our modes.
5778
5779 STOP_PRINT_FRAME nonzero means print the executing frame
5780 (pc, function, args, file, line number and line text).
5781 BREAKPOINTS_FAILED nonzero means stop was due to error
5782 attempting to insert breakpoints. */
5783
5784 void
5785 normal_stop (void)
5786 {
5787 struct target_waitstatus last;
5788 ptid_t last_ptid;
5789 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5790
5791 get_last_target_status (&last_ptid, &last);
5792
5793 /* If an exception is thrown from this point on, make sure to
5794 propagate GDB's knowledge of the executing state to the
5795 frontend/user running state. A QUIT is an easy exception to see
5796 here, so do this before any filtered output. */
5797 if (!non_stop)
5798 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5799 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5800 && last.kind != TARGET_WAITKIND_EXITED)
5801 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5802
5803 /* In non-stop mode, we don't want GDB to switch threads behind the
5804 user's back, to avoid races where the user is typing a command to
5805 apply to thread x, but GDB switches to thread y before the user
5806 finishes entering the command. */
5807
5808 /* As with the notification of thread events, we want to delay
5809 notifying the user that we've switched thread context until
5810 the inferior actually stops.
5811
5812 There's no point in saying anything if the inferior has exited.
5813 Note that SIGNALLED here means "exited with a signal", not
5814 "received a signal". */
5815 if (!non_stop
5816 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5817 && target_has_execution
5818 && last.kind != TARGET_WAITKIND_SIGNALLED
5819 && last.kind != TARGET_WAITKIND_EXITED)
5820 {
5821 target_terminal_ours_for_output ();
5822 printf_filtered (_("[Switching to %s]\n"),
5823 target_pid_to_str (inferior_ptid));
5824 annotate_thread_changed ();
5825 previous_inferior_ptid = inferior_ptid;
5826 }
5827
5828 if (!breakpoints_always_inserted_mode () && target_has_execution)
5829 {
5830 if (remove_breakpoints ())
5831 {
5832 target_terminal_ours_for_output ();
5833 printf_filtered (_("Cannot remove breakpoints because "
5834 "program is no longer writable.\nFurther "
5835 "execution is probably impossible.\n"));
5836 }
5837 }
5838
5839 /* If an auto-display called a function and that got a signal,
5840 delete that auto-display to avoid an infinite recursion. */
5841
5842 if (stopped_by_random_signal)
5843 disable_current_display ();
5844
5845 /* Don't print a message if in the middle of doing a "step n"
5846 operation for n > 1 */
5847 if (target_has_execution
5848 && last.kind != TARGET_WAITKIND_SIGNALLED
5849 && last.kind != TARGET_WAITKIND_EXITED
5850 && inferior_thread ()->step_multi
5851 && inferior_thread ()->control.stop_step)
5852 goto done;
5853
5854 target_terminal_ours ();
5855 async_enable_stdin ();
5856
5857 /* Set the current source location. This will also happen if we
5858 display the frame below, but the current SAL will be incorrect
5859 during a user hook-stop function. */
5860 if (has_stack_frames () && !stop_stack_dummy)
5861 set_current_sal_from_frame (get_current_frame (), 1);
5862
5863 /* Let the user/frontend see the threads as stopped. */
5864 do_cleanups (old_chain);
5865
5866 /* Look up the hook_stop and run it (CLI internally handles problem
5867 of stop_command's pre-hook not existing). */
5868 if (stop_command)
5869 catch_errors (hook_stop_stub, stop_command,
5870 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5871
5872 if (!has_stack_frames ())
5873 goto done;
5874
5875 if (last.kind == TARGET_WAITKIND_SIGNALLED
5876 || last.kind == TARGET_WAITKIND_EXITED)
5877 goto done;
5878
5879 /* Select innermost stack frame - i.e., current frame is frame 0,
5880 and current location is based on that.
5881 Don't do this on return from a stack dummy routine,
5882 or if the program has exited. */
5883
5884 if (!stop_stack_dummy)
5885 {
5886 select_frame (get_current_frame ());
5887
5888 /* Print current location without a level number, if
5889 we have changed functions or hit a breakpoint.
5890 Print source line if we have one.
5891 bpstat_print() contains the logic deciding in detail
5892 what to print, based on the event(s) that just occurred. */
5893
5894 /* If --batch-silent is enabled then there's no need to print the current
5895 source location, and to try risks causing an error message about
5896 missing source files. */
5897 if (stop_print_frame && !batch_silent)
5898 {
5899 int bpstat_ret;
5900 int source_flag;
5901 int do_frame_printing = 1;
5902 struct thread_info *tp = inferior_thread ();
5903
5904 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5905 switch (bpstat_ret)
5906 {
5907 case PRINT_UNKNOWN:
5908 /* If we had hit a shared library event breakpoint,
5909 bpstat_print would print out this message. If we hit
5910 an OS-level shared library event, do the same
5911 thing. */
5912 if (last.kind == TARGET_WAITKIND_LOADED)
5913 {
5914 printf_filtered (_("Stopped due to shared library event\n"));
5915 source_flag = SRC_LINE; /* something bogus */
5916 do_frame_printing = 0;
5917 break;
5918 }
5919
5920 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5921 (or should) carry around the function and does (or
5922 should) use that when doing a frame comparison. */
5923 if (tp->control.stop_step
5924 && frame_id_eq (tp->control.step_frame_id,
5925 get_frame_id (get_current_frame ()))
5926 && step_start_function == find_pc_function (stop_pc))
5927 source_flag = SRC_LINE; /* Finished step, just
5928 print source line. */
5929 else
5930 source_flag = SRC_AND_LOC; /* Print location and
5931 source line. */
5932 break;
5933 case PRINT_SRC_AND_LOC:
5934 source_flag = SRC_AND_LOC; /* Print location and
5935 source line. */
5936 break;
5937 case PRINT_SRC_ONLY:
5938 source_flag = SRC_LINE;
5939 break;
5940 case PRINT_NOTHING:
5941 source_flag = SRC_LINE; /* something bogus */
5942 do_frame_printing = 0;
5943 break;
5944 default:
5945 internal_error (__FILE__, __LINE__, _("Unknown value."));
5946 }
5947
5948 /* The behavior of this routine with respect to the source
5949 flag is:
5950 SRC_LINE: Print only source line
5951 LOCATION: Print only location
5952 SRC_AND_LOC: Print location and source line. */
5953 if (do_frame_printing)
5954 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5955
5956 /* Display the auto-display expressions. */
5957 do_displays ();
5958 }
5959 }
5960
5961 /* Save the function value return registers, if we care.
5962 We might be about to restore their previous contents. */
5963 if (inferior_thread ()->control.proceed_to_finish
5964 && execution_direction != EXEC_REVERSE)
5965 {
5966 /* This should not be necessary. */
5967 if (stop_registers)
5968 regcache_xfree (stop_registers);
5969
5970 /* NB: The copy goes through to the target picking up the value of
5971 all the registers. */
5972 stop_registers = regcache_dup (get_current_regcache ());
5973 }
5974
5975 if (stop_stack_dummy == STOP_STACK_DUMMY)
5976 {
5977 /* Pop the empty frame that contains the stack dummy.
5978 This also restores inferior state prior to the call
5979 (struct infcall_suspend_state). */
5980 struct frame_info *frame = get_current_frame ();
5981
5982 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5983 frame_pop (frame);
5984 /* frame_pop() calls reinit_frame_cache as the last thing it
5985 does which means there's currently no selected frame. We
5986 don't need to re-establish a selected frame if the dummy call
5987 returns normally, that will be done by
5988 restore_infcall_control_state. However, we do have to handle
5989 the case where the dummy call is returning after being
5990 stopped (e.g. the dummy call previously hit a breakpoint).
5991 We can't know which case we have so just always re-establish
5992 a selected frame here. */
5993 select_frame (get_current_frame ());
5994 }
5995
5996 done:
5997 annotate_stopped ();
5998
5999 /* Suppress the stop observer if we're in the middle of:
6000
6001 - a step n (n > 1), as there still more steps to be done.
6002
6003 - a "finish" command, as the observer will be called in
6004 finish_command_continuation, so it can include the inferior
6005 function's return value.
6006
6007 - calling an inferior function, as we pretend we inferior didn't
6008 run at all. The return value of the call is handled by the
6009 expression evaluator, through call_function_by_hand. */
6010
6011 if (!target_has_execution
6012 || last.kind == TARGET_WAITKIND_SIGNALLED
6013 || last.kind == TARGET_WAITKIND_EXITED
6014 || (!inferior_thread ()->step_multi
6015 && !(inferior_thread ()->control.stop_bpstat
6016 && inferior_thread ()->control.proceed_to_finish)
6017 && !inferior_thread ()->control.in_infcall))
6018 {
6019 if (!ptid_equal (inferior_ptid, null_ptid))
6020 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6021 stop_print_frame);
6022 else
6023 observer_notify_normal_stop (NULL, stop_print_frame);
6024 }
6025
6026 if (target_has_execution)
6027 {
6028 if (last.kind != TARGET_WAITKIND_SIGNALLED
6029 && last.kind != TARGET_WAITKIND_EXITED)
6030 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6031 Delete any breakpoint that is to be deleted at the next stop. */
6032 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6033 }
6034
6035 /* Try to get rid of automatically added inferiors that are no
6036 longer needed. Keeping those around slows down things linearly.
6037 Note that this never removes the current inferior. */
6038 prune_inferiors ();
6039 }
6040
6041 static int
6042 hook_stop_stub (void *cmd)
6043 {
6044 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6045 return (0);
6046 }
6047 \f
6048 int
6049 signal_stop_state (int signo)
6050 {
6051 return signal_stop[signo];
6052 }
6053
6054 int
6055 signal_print_state (int signo)
6056 {
6057 return signal_print[signo];
6058 }
6059
6060 int
6061 signal_pass_state (int signo)
6062 {
6063 return signal_program[signo];
6064 }
6065
6066 static void
6067 signal_cache_update (int signo)
6068 {
6069 if (signo == -1)
6070 {
6071 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6072 signal_cache_update (signo);
6073
6074 return;
6075 }
6076
6077 signal_pass[signo] = (signal_stop[signo] == 0
6078 && signal_print[signo] == 0
6079 && signal_program[signo] == 1);
6080 }
6081
6082 int
6083 signal_stop_update (int signo, int state)
6084 {
6085 int ret = signal_stop[signo];
6086
6087 signal_stop[signo] = state;
6088 signal_cache_update (signo);
6089 return ret;
6090 }
6091
6092 int
6093 signal_print_update (int signo, int state)
6094 {
6095 int ret = signal_print[signo];
6096
6097 signal_print[signo] = state;
6098 signal_cache_update (signo);
6099 return ret;
6100 }
6101
6102 int
6103 signal_pass_update (int signo, int state)
6104 {
6105 int ret = signal_program[signo];
6106
6107 signal_program[signo] = state;
6108 signal_cache_update (signo);
6109 return ret;
6110 }
6111
6112 static void
6113 sig_print_header (void)
6114 {
6115 printf_filtered (_("Signal Stop\tPrint\tPass "
6116 "to program\tDescription\n"));
6117 }
6118
6119 static void
6120 sig_print_info (enum target_signal oursig)
6121 {
6122 const char *name = target_signal_to_name (oursig);
6123 int name_padding = 13 - strlen (name);
6124
6125 if (name_padding <= 0)
6126 name_padding = 0;
6127
6128 printf_filtered ("%s", name);
6129 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6130 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6131 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6132 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6133 printf_filtered ("%s\n", target_signal_to_string (oursig));
6134 }
6135
6136 /* Specify how various signals in the inferior should be handled. */
6137
6138 static void
6139 handle_command (char *args, int from_tty)
6140 {
6141 char **argv;
6142 int digits, wordlen;
6143 int sigfirst, signum, siglast;
6144 enum target_signal oursig;
6145 int allsigs;
6146 int nsigs;
6147 unsigned char *sigs;
6148 struct cleanup *old_chain;
6149
6150 if (args == NULL)
6151 {
6152 error_no_arg (_("signal to handle"));
6153 }
6154
6155 /* Allocate and zero an array of flags for which signals to handle. */
6156
6157 nsigs = (int) TARGET_SIGNAL_LAST;
6158 sigs = (unsigned char *) alloca (nsigs);
6159 memset (sigs, 0, nsigs);
6160
6161 /* Break the command line up into args. */
6162
6163 argv = gdb_buildargv (args);
6164 old_chain = make_cleanup_freeargv (argv);
6165
6166 /* Walk through the args, looking for signal oursigs, signal names, and
6167 actions. Signal numbers and signal names may be interspersed with
6168 actions, with the actions being performed for all signals cumulatively
6169 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6170
6171 while (*argv != NULL)
6172 {
6173 wordlen = strlen (*argv);
6174 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6175 {;
6176 }
6177 allsigs = 0;
6178 sigfirst = siglast = -1;
6179
6180 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6181 {
6182 /* Apply action to all signals except those used by the
6183 debugger. Silently skip those. */
6184 allsigs = 1;
6185 sigfirst = 0;
6186 siglast = nsigs - 1;
6187 }
6188 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6189 {
6190 SET_SIGS (nsigs, sigs, signal_stop);
6191 SET_SIGS (nsigs, sigs, signal_print);
6192 }
6193 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6194 {
6195 UNSET_SIGS (nsigs, sigs, signal_program);
6196 }
6197 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6198 {
6199 SET_SIGS (nsigs, sigs, signal_print);
6200 }
6201 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6202 {
6203 SET_SIGS (nsigs, sigs, signal_program);
6204 }
6205 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6206 {
6207 UNSET_SIGS (nsigs, sigs, signal_stop);
6208 }
6209 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6210 {
6211 SET_SIGS (nsigs, sigs, signal_program);
6212 }
6213 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6214 {
6215 UNSET_SIGS (nsigs, sigs, signal_print);
6216 UNSET_SIGS (nsigs, sigs, signal_stop);
6217 }
6218 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6219 {
6220 UNSET_SIGS (nsigs, sigs, signal_program);
6221 }
6222 else if (digits > 0)
6223 {
6224 /* It is numeric. The numeric signal refers to our own
6225 internal signal numbering from target.h, not to host/target
6226 signal number. This is a feature; users really should be
6227 using symbolic names anyway, and the common ones like
6228 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6229
6230 sigfirst = siglast = (int)
6231 target_signal_from_command (atoi (*argv));
6232 if ((*argv)[digits] == '-')
6233 {
6234 siglast = (int)
6235 target_signal_from_command (atoi ((*argv) + digits + 1));
6236 }
6237 if (sigfirst > siglast)
6238 {
6239 /* Bet he didn't figure we'd think of this case... */
6240 signum = sigfirst;
6241 sigfirst = siglast;
6242 siglast = signum;
6243 }
6244 }
6245 else
6246 {
6247 oursig = target_signal_from_name (*argv);
6248 if (oursig != TARGET_SIGNAL_UNKNOWN)
6249 {
6250 sigfirst = siglast = (int) oursig;
6251 }
6252 else
6253 {
6254 /* Not a number and not a recognized flag word => complain. */
6255 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6256 }
6257 }
6258
6259 /* If any signal numbers or symbol names were found, set flags for
6260 which signals to apply actions to. */
6261
6262 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6263 {
6264 switch ((enum target_signal) signum)
6265 {
6266 case TARGET_SIGNAL_TRAP:
6267 case TARGET_SIGNAL_INT:
6268 if (!allsigs && !sigs[signum])
6269 {
6270 if (query (_("%s is used by the debugger.\n\
6271 Are you sure you want to change it? "),
6272 target_signal_to_name ((enum target_signal) signum)))
6273 {
6274 sigs[signum] = 1;
6275 }
6276 else
6277 {
6278 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6279 gdb_flush (gdb_stdout);
6280 }
6281 }
6282 break;
6283 case TARGET_SIGNAL_0:
6284 case TARGET_SIGNAL_DEFAULT:
6285 case TARGET_SIGNAL_UNKNOWN:
6286 /* Make sure that "all" doesn't print these. */
6287 break;
6288 default:
6289 sigs[signum] = 1;
6290 break;
6291 }
6292 }
6293
6294 argv++;
6295 }
6296
6297 for (signum = 0; signum < nsigs; signum++)
6298 if (sigs[signum])
6299 {
6300 signal_cache_update (-1);
6301 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6302
6303 if (from_tty)
6304 {
6305 /* Show the results. */
6306 sig_print_header ();
6307 for (; signum < nsigs; signum++)
6308 if (sigs[signum])
6309 sig_print_info (signum);
6310 }
6311
6312 break;
6313 }
6314
6315 do_cleanups (old_chain);
6316 }
6317
6318 static void
6319 xdb_handle_command (char *args, int from_tty)
6320 {
6321 char **argv;
6322 struct cleanup *old_chain;
6323
6324 if (args == NULL)
6325 error_no_arg (_("xdb command"));
6326
6327 /* Break the command line up into args. */
6328
6329 argv = gdb_buildargv (args);
6330 old_chain = make_cleanup_freeargv (argv);
6331 if (argv[1] != (char *) NULL)
6332 {
6333 char *argBuf;
6334 int bufLen;
6335
6336 bufLen = strlen (argv[0]) + 20;
6337 argBuf = (char *) xmalloc (bufLen);
6338 if (argBuf)
6339 {
6340 int validFlag = 1;
6341 enum target_signal oursig;
6342
6343 oursig = target_signal_from_name (argv[0]);
6344 memset (argBuf, 0, bufLen);
6345 if (strcmp (argv[1], "Q") == 0)
6346 sprintf (argBuf, "%s %s", argv[0], "noprint");
6347 else
6348 {
6349 if (strcmp (argv[1], "s") == 0)
6350 {
6351 if (!signal_stop[oursig])
6352 sprintf (argBuf, "%s %s", argv[0], "stop");
6353 else
6354 sprintf (argBuf, "%s %s", argv[0], "nostop");
6355 }
6356 else if (strcmp (argv[1], "i") == 0)
6357 {
6358 if (!signal_program[oursig])
6359 sprintf (argBuf, "%s %s", argv[0], "pass");
6360 else
6361 sprintf (argBuf, "%s %s", argv[0], "nopass");
6362 }
6363 else if (strcmp (argv[1], "r") == 0)
6364 {
6365 if (!signal_print[oursig])
6366 sprintf (argBuf, "%s %s", argv[0], "print");
6367 else
6368 sprintf (argBuf, "%s %s", argv[0], "noprint");
6369 }
6370 else
6371 validFlag = 0;
6372 }
6373 if (validFlag)
6374 handle_command (argBuf, from_tty);
6375 else
6376 printf_filtered (_("Invalid signal handling flag.\n"));
6377 if (argBuf)
6378 xfree (argBuf);
6379 }
6380 }
6381 do_cleanups (old_chain);
6382 }
6383
6384 /* Print current contents of the tables set by the handle command.
6385 It is possible we should just be printing signals actually used
6386 by the current target (but for things to work right when switching
6387 targets, all signals should be in the signal tables). */
6388
6389 static void
6390 signals_info (char *signum_exp, int from_tty)
6391 {
6392 enum target_signal oursig;
6393
6394 sig_print_header ();
6395
6396 if (signum_exp)
6397 {
6398 /* First see if this is a symbol name. */
6399 oursig = target_signal_from_name (signum_exp);
6400 if (oursig == TARGET_SIGNAL_UNKNOWN)
6401 {
6402 /* No, try numeric. */
6403 oursig =
6404 target_signal_from_command (parse_and_eval_long (signum_exp));
6405 }
6406 sig_print_info (oursig);
6407 return;
6408 }
6409
6410 printf_filtered ("\n");
6411 /* These ugly casts brought to you by the native VAX compiler. */
6412 for (oursig = TARGET_SIGNAL_FIRST;
6413 (int) oursig < (int) TARGET_SIGNAL_LAST;
6414 oursig = (enum target_signal) ((int) oursig + 1))
6415 {
6416 QUIT;
6417
6418 if (oursig != TARGET_SIGNAL_UNKNOWN
6419 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6420 sig_print_info (oursig);
6421 }
6422
6423 printf_filtered (_("\nUse the \"handle\" command "
6424 "to change these tables.\n"));
6425 }
6426
6427 /* Check if it makes sense to read $_siginfo from the current thread
6428 at this point. If not, throw an error. */
6429
6430 static void
6431 validate_siginfo_access (void)
6432 {
6433 /* No current inferior, no siginfo. */
6434 if (ptid_equal (inferior_ptid, null_ptid))
6435 error (_("No thread selected."));
6436
6437 /* Don't try to read from a dead thread. */
6438 if (is_exited (inferior_ptid))
6439 error (_("The current thread has terminated"));
6440
6441 /* ... or from a spinning thread. */
6442 if (is_running (inferior_ptid))
6443 error (_("Selected thread is running."));
6444 }
6445
6446 /* The $_siginfo convenience variable is a bit special. We don't know
6447 for sure the type of the value until we actually have a chance to
6448 fetch the data. The type can change depending on gdbarch, so it is
6449 also dependent on which thread you have selected.
6450
6451 1. making $_siginfo be an internalvar that creates a new value on
6452 access.
6453
6454 2. making the value of $_siginfo be an lval_computed value. */
6455
6456 /* This function implements the lval_computed support for reading a
6457 $_siginfo value. */
6458
6459 static void
6460 siginfo_value_read (struct value *v)
6461 {
6462 LONGEST transferred;
6463
6464 validate_siginfo_access ();
6465
6466 transferred =
6467 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6468 NULL,
6469 value_contents_all_raw (v),
6470 value_offset (v),
6471 TYPE_LENGTH (value_type (v)));
6472
6473 if (transferred != TYPE_LENGTH (value_type (v)))
6474 error (_("Unable to read siginfo"));
6475 }
6476
6477 /* This function implements the lval_computed support for writing a
6478 $_siginfo value. */
6479
6480 static void
6481 siginfo_value_write (struct value *v, struct value *fromval)
6482 {
6483 LONGEST transferred;
6484
6485 validate_siginfo_access ();
6486
6487 transferred = target_write (&current_target,
6488 TARGET_OBJECT_SIGNAL_INFO,
6489 NULL,
6490 value_contents_all_raw (fromval),
6491 value_offset (v),
6492 TYPE_LENGTH (value_type (fromval)));
6493
6494 if (transferred != TYPE_LENGTH (value_type (fromval)))
6495 error (_("Unable to write siginfo"));
6496 }
6497
6498 static const struct lval_funcs siginfo_value_funcs =
6499 {
6500 siginfo_value_read,
6501 siginfo_value_write
6502 };
6503
6504 /* Return a new value with the correct type for the siginfo object of
6505 the current thread using architecture GDBARCH. Return a void value
6506 if there's no object available. */
6507
6508 static struct value *
6509 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6510 {
6511 if (target_has_stack
6512 && !ptid_equal (inferior_ptid, null_ptid)
6513 && gdbarch_get_siginfo_type_p (gdbarch))
6514 {
6515 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6516
6517 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6518 }
6519
6520 return allocate_value (builtin_type (gdbarch)->builtin_void);
6521 }
6522
6523 \f
6524 /* infcall_suspend_state contains state about the program itself like its
6525 registers and any signal it received when it last stopped.
6526 This state must be restored regardless of how the inferior function call
6527 ends (either successfully, or after it hits a breakpoint or signal)
6528 if the program is to properly continue where it left off. */
6529
6530 struct infcall_suspend_state
6531 {
6532 struct thread_suspend_state thread_suspend;
6533 struct inferior_suspend_state inferior_suspend;
6534
6535 /* Other fields: */
6536 CORE_ADDR stop_pc;
6537 struct regcache *registers;
6538
6539 /* Format of SIGINFO_DATA or NULL if it is not present. */
6540 struct gdbarch *siginfo_gdbarch;
6541
6542 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6543 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6544 content would be invalid. */
6545 gdb_byte *siginfo_data;
6546 };
6547
6548 struct infcall_suspend_state *
6549 save_infcall_suspend_state (void)
6550 {
6551 struct infcall_suspend_state *inf_state;
6552 struct thread_info *tp = inferior_thread ();
6553 struct inferior *inf = current_inferior ();
6554 struct regcache *regcache = get_current_regcache ();
6555 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6556 gdb_byte *siginfo_data = NULL;
6557
6558 if (gdbarch_get_siginfo_type_p (gdbarch))
6559 {
6560 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6561 size_t len = TYPE_LENGTH (type);
6562 struct cleanup *back_to;
6563
6564 siginfo_data = xmalloc (len);
6565 back_to = make_cleanup (xfree, siginfo_data);
6566
6567 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6568 siginfo_data, 0, len) == len)
6569 discard_cleanups (back_to);
6570 else
6571 {
6572 /* Errors ignored. */
6573 do_cleanups (back_to);
6574 siginfo_data = NULL;
6575 }
6576 }
6577
6578 inf_state = XZALLOC (struct infcall_suspend_state);
6579
6580 if (siginfo_data)
6581 {
6582 inf_state->siginfo_gdbarch = gdbarch;
6583 inf_state->siginfo_data = siginfo_data;
6584 }
6585
6586 inf_state->thread_suspend = tp->suspend;
6587 inf_state->inferior_suspend = inf->suspend;
6588
6589 /* run_inferior_call will not use the signal due to its `proceed' call with
6590 TARGET_SIGNAL_0 anyway. */
6591 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6592
6593 inf_state->stop_pc = stop_pc;
6594
6595 inf_state->registers = regcache_dup (regcache);
6596
6597 return inf_state;
6598 }
6599
6600 /* Restore inferior session state to INF_STATE. */
6601
6602 void
6603 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6604 {
6605 struct thread_info *tp = inferior_thread ();
6606 struct inferior *inf = current_inferior ();
6607 struct regcache *regcache = get_current_regcache ();
6608 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6609
6610 tp->suspend = inf_state->thread_suspend;
6611 inf->suspend = inf_state->inferior_suspend;
6612
6613 stop_pc = inf_state->stop_pc;
6614
6615 if (inf_state->siginfo_gdbarch == gdbarch)
6616 {
6617 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6618 size_t len = TYPE_LENGTH (type);
6619
6620 /* Errors ignored. */
6621 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6622 inf_state->siginfo_data, 0, len);
6623 }
6624
6625 /* The inferior can be gone if the user types "print exit(0)"
6626 (and perhaps other times). */
6627 if (target_has_execution)
6628 /* NB: The register write goes through to the target. */
6629 regcache_cpy (regcache, inf_state->registers);
6630
6631 discard_infcall_suspend_state (inf_state);
6632 }
6633
6634 static void
6635 do_restore_infcall_suspend_state_cleanup (void *state)
6636 {
6637 restore_infcall_suspend_state (state);
6638 }
6639
6640 struct cleanup *
6641 make_cleanup_restore_infcall_suspend_state
6642 (struct infcall_suspend_state *inf_state)
6643 {
6644 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6645 }
6646
6647 void
6648 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6649 {
6650 regcache_xfree (inf_state->registers);
6651 xfree (inf_state->siginfo_data);
6652 xfree (inf_state);
6653 }
6654
6655 struct regcache *
6656 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6657 {
6658 return inf_state->registers;
6659 }
6660
6661 /* infcall_control_state contains state regarding gdb's control of the
6662 inferior itself like stepping control. It also contains session state like
6663 the user's currently selected frame. */
6664
6665 struct infcall_control_state
6666 {
6667 struct thread_control_state thread_control;
6668 struct inferior_control_state inferior_control;
6669
6670 /* Other fields: */
6671 enum stop_stack_kind stop_stack_dummy;
6672 int stopped_by_random_signal;
6673 int stop_after_trap;
6674
6675 /* ID if the selected frame when the inferior function call was made. */
6676 struct frame_id selected_frame_id;
6677 };
6678
6679 /* Save all of the information associated with the inferior<==>gdb
6680 connection. */
6681
6682 struct infcall_control_state *
6683 save_infcall_control_state (void)
6684 {
6685 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6686 struct thread_info *tp = inferior_thread ();
6687 struct inferior *inf = current_inferior ();
6688
6689 inf_status->thread_control = tp->control;
6690 inf_status->inferior_control = inf->control;
6691
6692 tp->control.step_resume_breakpoint = NULL;
6693 tp->control.exception_resume_breakpoint = NULL;
6694
6695 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6696 chain. If caller's caller is walking the chain, they'll be happier if we
6697 hand them back the original chain when restore_infcall_control_state is
6698 called. */
6699 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6700
6701 /* Other fields: */
6702 inf_status->stop_stack_dummy = stop_stack_dummy;
6703 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6704 inf_status->stop_after_trap = stop_after_trap;
6705
6706 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6707
6708 return inf_status;
6709 }
6710
6711 static int
6712 restore_selected_frame (void *args)
6713 {
6714 struct frame_id *fid = (struct frame_id *) args;
6715 struct frame_info *frame;
6716
6717 frame = frame_find_by_id (*fid);
6718
6719 /* If inf_status->selected_frame_id is NULL, there was no previously
6720 selected frame. */
6721 if (frame == NULL)
6722 {
6723 warning (_("Unable to restore previously selected frame."));
6724 return 0;
6725 }
6726
6727 select_frame (frame);
6728
6729 return (1);
6730 }
6731
6732 /* Restore inferior session state to INF_STATUS. */
6733
6734 void
6735 restore_infcall_control_state (struct infcall_control_state *inf_status)
6736 {
6737 struct thread_info *tp = inferior_thread ();
6738 struct inferior *inf = current_inferior ();
6739
6740 if (tp->control.step_resume_breakpoint)
6741 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6742
6743 if (tp->control.exception_resume_breakpoint)
6744 tp->control.exception_resume_breakpoint->disposition
6745 = disp_del_at_next_stop;
6746
6747 /* Handle the bpstat_copy of the chain. */
6748 bpstat_clear (&tp->control.stop_bpstat);
6749
6750 tp->control = inf_status->thread_control;
6751 inf->control = inf_status->inferior_control;
6752
6753 /* Other fields: */
6754 stop_stack_dummy = inf_status->stop_stack_dummy;
6755 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6756 stop_after_trap = inf_status->stop_after_trap;
6757
6758 if (target_has_stack)
6759 {
6760 /* The point of catch_errors is that if the stack is clobbered,
6761 walking the stack might encounter a garbage pointer and
6762 error() trying to dereference it. */
6763 if (catch_errors
6764 (restore_selected_frame, &inf_status->selected_frame_id,
6765 "Unable to restore previously selected frame:\n",
6766 RETURN_MASK_ERROR) == 0)
6767 /* Error in restoring the selected frame. Select the innermost
6768 frame. */
6769 select_frame (get_current_frame ());
6770 }
6771
6772 xfree (inf_status);
6773 }
6774
6775 static void
6776 do_restore_infcall_control_state_cleanup (void *sts)
6777 {
6778 restore_infcall_control_state (sts);
6779 }
6780
6781 struct cleanup *
6782 make_cleanup_restore_infcall_control_state
6783 (struct infcall_control_state *inf_status)
6784 {
6785 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6786 }
6787
6788 void
6789 discard_infcall_control_state (struct infcall_control_state *inf_status)
6790 {
6791 if (inf_status->thread_control.step_resume_breakpoint)
6792 inf_status->thread_control.step_resume_breakpoint->disposition
6793 = disp_del_at_next_stop;
6794
6795 if (inf_status->thread_control.exception_resume_breakpoint)
6796 inf_status->thread_control.exception_resume_breakpoint->disposition
6797 = disp_del_at_next_stop;
6798
6799 /* See save_infcall_control_state for info on stop_bpstat. */
6800 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6801
6802 xfree (inf_status);
6803 }
6804 \f
6805 int
6806 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6807 {
6808 struct target_waitstatus last;
6809 ptid_t last_ptid;
6810
6811 get_last_target_status (&last_ptid, &last);
6812
6813 if (last.kind != TARGET_WAITKIND_FORKED)
6814 return 0;
6815
6816 if (!ptid_equal (last_ptid, pid))
6817 return 0;
6818
6819 *child_pid = last.value.related_pid;
6820 return 1;
6821 }
6822
6823 int
6824 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6825 {
6826 struct target_waitstatus last;
6827 ptid_t last_ptid;
6828
6829 get_last_target_status (&last_ptid, &last);
6830
6831 if (last.kind != TARGET_WAITKIND_VFORKED)
6832 return 0;
6833
6834 if (!ptid_equal (last_ptid, pid))
6835 return 0;
6836
6837 *child_pid = last.value.related_pid;
6838 return 1;
6839 }
6840
6841 int
6842 inferior_has_execd (ptid_t pid, char **execd_pathname)
6843 {
6844 struct target_waitstatus last;
6845 ptid_t last_ptid;
6846
6847 get_last_target_status (&last_ptid, &last);
6848
6849 if (last.kind != TARGET_WAITKIND_EXECD)
6850 return 0;
6851
6852 if (!ptid_equal (last_ptid, pid))
6853 return 0;
6854
6855 *execd_pathname = xstrdup (last.value.execd_pathname);
6856 return 1;
6857 }
6858
6859 int
6860 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6861 {
6862 struct target_waitstatus last;
6863 ptid_t last_ptid;
6864
6865 get_last_target_status (&last_ptid, &last);
6866
6867 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6868 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6869 return 0;
6870
6871 if (!ptid_equal (last_ptid, pid))
6872 return 0;
6873
6874 *syscall_number = last.value.syscall_number;
6875 return 1;
6876 }
6877
6878 int
6879 ptid_match (ptid_t ptid, ptid_t filter)
6880 {
6881 if (ptid_equal (filter, minus_one_ptid))
6882 return 1;
6883 if (ptid_is_pid (filter)
6884 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6885 return 1;
6886 else if (ptid_equal (ptid, filter))
6887 return 1;
6888
6889 return 0;
6890 }
6891
6892 /* restore_inferior_ptid() will be used by the cleanup machinery
6893 to restore the inferior_ptid value saved in a call to
6894 save_inferior_ptid(). */
6895
6896 static void
6897 restore_inferior_ptid (void *arg)
6898 {
6899 ptid_t *saved_ptid_ptr = arg;
6900
6901 inferior_ptid = *saved_ptid_ptr;
6902 xfree (arg);
6903 }
6904
6905 /* Save the value of inferior_ptid so that it may be restored by a
6906 later call to do_cleanups(). Returns the struct cleanup pointer
6907 needed for later doing the cleanup. */
6908
6909 struct cleanup *
6910 save_inferior_ptid (void)
6911 {
6912 ptid_t *saved_ptid_ptr;
6913
6914 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6915 *saved_ptid_ptr = inferior_ptid;
6916 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6917 }
6918 \f
6919
6920 /* User interface for reverse debugging:
6921 Set exec-direction / show exec-direction commands
6922 (returns error unless target implements to_set_exec_direction method). */
6923
6924 int execution_direction = EXEC_FORWARD;
6925 static const char exec_forward[] = "forward";
6926 static const char exec_reverse[] = "reverse";
6927 static const char *exec_direction = exec_forward;
6928 static const char *exec_direction_names[] = {
6929 exec_forward,
6930 exec_reverse,
6931 NULL
6932 };
6933
6934 static void
6935 set_exec_direction_func (char *args, int from_tty,
6936 struct cmd_list_element *cmd)
6937 {
6938 if (target_can_execute_reverse)
6939 {
6940 if (!strcmp (exec_direction, exec_forward))
6941 execution_direction = EXEC_FORWARD;
6942 else if (!strcmp (exec_direction, exec_reverse))
6943 execution_direction = EXEC_REVERSE;
6944 }
6945 else
6946 {
6947 exec_direction = exec_forward;
6948 error (_("Target does not support this operation."));
6949 }
6950 }
6951
6952 static void
6953 show_exec_direction_func (struct ui_file *out, int from_tty,
6954 struct cmd_list_element *cmd, const char *value)
6955 {
6956 switch (execution_direction) {
6957 case EXEC_FORWARD:
6958 fprintf_filtered (out, _("Forward.\n"));
6959 break;
6960 case EXEC_REVERSE:
6961 fprintf_filtered (out, _("Reverse.\n"));
6962 break;
6963 default:
6964 internal_error (__FILE__, __LINE__,
6965 _("bogus execution_direction value: %d"),
6966 (int) execution_direction);
6967 }
6968 }
6969
6970 /* User interface for non-stop mode. */
6971
6972 int non_stop = 0;
6973
6974 static void
6975 set_non_stop (char *args, int from_tty,
6976 struct cmd_list_element *c)
6977 {
6978 if (target_has_execution)
6979 {
6980 non_stop_1 = non_stop;
6981 error (_("Cannot change this setting while the inferior is running."));
6982 }
6983
6984 non_stop = non_stop_1;
6985 }
6986
6987 static void
6988 show_non_stop (struct ui_file *file, int from_tty,
6989 struct cmd_list_element *c, const char *value)
6990 {
6991 fprintf_filtered (file,
6992 _("Controlling the inferior in non-stop mode is %s.\n"),
6993 value);
6994 }
6995
6996 static void
6997 show_schedule_multiple (struct ui_file *file, int from_tty,
6998 struct cmd_list_element *c, const char *value)
6999 {
7000 fprintf_filtered (file, _("Resuming the execution of threads "
7001 "of all processes is %s.\n"), value);
7002 }
7003
7004 void
7005 _initialize_infrun (void)
7006 {
7007 int i;
7008 int numsigs;
7009
7010 add_info ("signals", signals_info, _("\
7011 What debugger does when program gets various signals.\n\
7012 Specify a signal as argument to print info on that signal only."));
7013 add_info_alias ("handle", "signals", 0);
7014
7015 add_com ("handle", class_run, handle_command, _("\
7016 Specify how to handle a signal.\n\
7017 Args are signals and actions to apply to those signals.\n\
7018 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7019 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7020 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7021 The special arg \"all\" is recognized to mean all signals except those\n\
7022 used by the debugger, typically SIGTRAP and SIGINT.\n\
7023 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7024 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7025 Stop means reenter debugger if this signal happens (implies print).\n\
7026 Print means print a message if this signal happens.\n\
7027 Pass means let program see this signal; otherwise program doesn't know.\n\
7028 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7029 Pass and Stop may be combined."));
7030 if (xdb_commands)
7031 {
7032 add_com ("lz", class_info, signals_info, _("\
7033 What debugger does when program gets various signals.\n\
7034 Specify a signal as argument to print info on that signal only."));
7035 add_com ("z", class_run, xdb_handle_command, _("\
7036 Specify how to handle a signal.\n\
7037 Args are signals and actions to apply to those signals.\n\
7038 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7039 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7040 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7041 The special arg \"all\" is recognized to mean all signals except those\n\
7042 used by the debugger, typically SIGTRAP and SIGINT.\n\
7043 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7044 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7045 nopass), \"Q\" (noprint)\n\
7046 Stop means reenter debugger if this signal happens (implies print).\n\
7047 Print means print a message if this signal happens.\n\
7048 Pass means let program see this signal; otherwise program doesn't know.\n\
7049 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7050 Pass and Stop may be combined."));
7051 }
7052
7053 if (!dbx_commands)
7054 stop_command = add_cmd ("stop", class_obscure,
7055 not_just_help_class_command, _("\
7056 There is no `stop' command, but you can set a hook on `stop'.\n\
7057 This allows you to set a list of commands to be run each time execution\n\
7058 of the program stops."), &cmdlist);
7059
7060 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7061 Set inferior debugging."), _("\
7062 Show inferior debugging."), _("\
7063 When non-zero, inferior specific debugging is enabled."),
7064 NULL,
7065 show_debug_infrun,
7066 &setdebuglist, &showdebuglist);
7067
7068 add_setshow_boolean_cmd ("displaced", class_maintenance,
7069 &debug_displaced, _("\
7070 Set displaced stepping debugging."), _("\
7071 Show displaced stepping debugging."), _("\
7072 When non-zero, displaced stepping specific debugging is enabled."),
7073 NULL,
7074 show_debug_displaced,
7075 &setdebuglist, &showdebuglist);
7076
7077 add_setshow_boolean_cmd ("non-stop", no_class,
7078 &non_stop_1, _("\
7079 Set whether gdb controls the inferior in non-stop mode."), _("\
7080 Show whether gdb controls the inferior in non-stop mode."), _("\
7081 When debugging a multi-threaded program and this setting is\n\
7082 off (the default, also called all-stop mode), when one thread stops\n\
7083 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7084 all other threads in the program while you interact with the thread of\n\
7085 interest. When you continue or step a thread, you can allow the other\n\
7086 threads to run, or have them remain stopped, but while you inspect any\n\
7087 thread's state, all threads stop.\n\
7088 \n\
7089 In non-stop mode, when one thread stops, other threads can continue\n\
7090 to run freely. You'll be able to step each thread independently,\n\
7091 leave it stopped or free to run as needed."),
7092 set_non_stop,
7093 show_non_stop,
7094 &setlist,
7095 &showlist);
7096
7097 numsigs = (int) TARGET_SIGNAL_LAST;
7098 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7099 signal_print = (unsigned char *)
7100 xmalloc (sizeof (signal_print[0]) * numsigs);
7101 signal_program = (unsigned char *)
7102 xmalloc (sizeof (signal_program[0]) * numsigs);
7103 signal_pass = (unsigned char *)
7104 xmalloc (sizeof (signal_program[0]) * numsigs);
7105 for (i = 0; i < numsigs; i++)
7106 {
7107 signal_stop[i] = 1;
7108 signal_print[i] = 1;
7109 signal_program[i] = 1;
7110 }
7111
7112 /* Signals caused by debugger's own actions
7113 should not be given to the program afterwards. */
7114 signal_program[TARGET_SIGNAL_TRAP] = 0;
7115 signal_program[TARGET_SIGNAL_INT] = 0;
7116
7117 /* Signals that are not errors should not normally enter the debugger. */
7118 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7119 signal_print[TARGET_SIGNAL_ALRM] = 0;
7120 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7121 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7122 signal_stop[TARGET_SIGNAL_PROF] = 0;
7123 signal_print[TARGET_SIGNAL_PROF] = 0;
7124 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7125 signal_print[TARGET_SIGNAL_CHLD] = 0;
7126 signal_stop[TARGET_SIGNAL_IO] = 0;
7127 signal_print[TARGET_SIGNAL_IO] = 0;
7128 signal_stop[TARGET_SIGNAL_POLL] = 0;
7129 signal_print[TARGET_SIGNAL_POLL] = 0;
7130 signal_stop[TARGET_SIGNAL_URG] = 0;
7131 signal_print[TARGET_SIGNAL_URG] = 0;
7132 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7133 signal_print[TARGET_SIGNAL_WINCH] = 0;
7134 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7135 signal_print[TARGET_SIGNAL_PRIO] = 0;
7136
7137 /* These signals are used internally by user-level thread
7138 implementations. (See signal(5) on Solaris.) Like the above
7139 signals, a healthy program receives and handles them as part of
7140 its normal operation. */
7141 signal_stop[TARGET_SIGNAL_LWP] = 0;
7142 signal_print[TARGET_SIGNAL_LWP] = 0;
7143 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7144 signal_print[TARGET_SIGNAL_WAITING] = 0;
7145 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7146 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7147
7148 /* Update cached state. */
7149 signal_cache_update (-1);
7150
7151 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7152 &stop_on_solib_events, _("\
7153 Set stopping for shared library events."), _("\
7154 Show stopping for shared library events."), _("\
7155 If nonzero, gdb will give control to the user when the dynamic linker\n\
7156 notifies gdb of shared library events. The most common event of interest\n\
7157 to the user would be loading/unloading of a new library."),
7158 NULL,
7159 show_stop_on_solib_events,
7160 &setlist, &showlist);
7161
7162 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7163 follow_fork_mode_kind_names,
7164 &follow_fork_mode_string, _("\
7165 Set debugger response to a program call of fork or vfork."), _("\
7166 Show debugger response to a program call of fork or vfork."), _("\
7167 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7168 parent - the original process is debugged after a fork\n\
7169 child - the new process is debugged after a fork\n\
7170 The unfollowed process will continue to run.\n\
7171 By default, the debugger will follow the parent process."),
7172 NULL,
7173 show_follow_fork_mode_string,
7174 &setlist, &showlist);
7175
7176 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7177 follow_exec_mode_names,
7178 &follow_exec_mode_string, _("\
7179 Set debugger response to a program call of exec."), _("\
7180 Show debugger response to a program call of exec."), _("\
7181 An exec call replaces the program image of a process.\n\
7182 \n\
7183 follow-exec-mode can be:\n\
7184 \n\
7185 new - the debugger creates a new inferior and rebinds the process\n\
7186 to this new inferior. The program the process was running before\n\
7187 the exec call can be restarted afterwards by restarting the original\n\
7188 inferior.\n\
7189 \n\
7190 same - the debugger keeps the process bound to the same inferior.\n\
7191 The new executable image replaces the previous executable loaded in\n\
7192 the inferior. Restarting the inferior after the exec call restarts\n\
7193 the executable the process was running after the exec call.\n\
7194 \n\
7195 By default, the debugger will use the same inferior."),
7196 NULL,
7197 show_follow_exec_mode_string,
7198 &setlist, &showlist);
7199
7200 add_setshow_enum_cmd ("scheduler-locking", class_run,
7201 scheduler_enums, &scheduler_mode, _("\
7202 Set mode for locking scheduler during execution."), _("\
7203 Show mode for locking scheduler during execution."), _("\
7204 off == no locking (threads may preempt at any time)\n\
7205 on == full locking (no thread except the current thread may run)\n\
7206 step == scheduler locked during every single-step operation.\n\
7207 In this mode, no other thread may run during a step command.\n\
7208 Other threads may run while stepping over a function call ('next')."),
7209 set_schedlock_func, /* traps on target vector */
7210 show_scheduler_mode,
7211 &setlist, &showlist);
7212
7213 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7214 Set mode for resuming threads of all processes."), _("\
7215 Show mode for resuming threads of all processes."), _("\
7216 When on, execution commands (such as 'continue' or 'next') resume all\n\
7217 threads of all processes. When off (which is the default), execution\n\
7218 commands only resume the threads of the current process. The set of\n\
7219 threads that are resumed is further refined by the scheduler-locking\n\
7220 mode (see help set scheduler-locking)."),
7221 NULL,
7222 show_schedule_multiple,
7223 &setlist, &showlist);
7224
7225 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7226 Set mode of the step operation."), _("\
7227 Show mode of the step operation."), _("\
7228 When set, doing a step over a function without debug line information\n\
7229 will stop at the first instruction of that function. Otherwise, the\n\
7230 function is skipped and the step command stops at a different source line."),
7231 NULL,
7232 show_step_stop_if_no_debug,
7233 &setlist, &showlist);
7234
7235 add_setshow_enum_cmd ("displaced-stepping", class_run,
7236 can_use_displaced_stepping_enum,
7237 &can_use_displaced_stepping, _("\
7238 Set debugger's willingness to use displaced stepping."), _("\
7239 Show debugger's willingness to use displaced stepping."), _("\
7240 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7241 supported by the target architecture. If off, gdb will not use displaced\n\
7242 stepping to step over breakpoints, even if such is supported by the target\n\
7243 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7244 if the target architecture supports it and non-stop mode is active, but will not\n\
7245 use it in all-stop mode (see help set non-stop)."),
7246 NULL,
7247 show_can_use_displaced_stepping,
7248 &setlist, &showlist);
7249
7250 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7251 &exec_direction, _("Set direction of execution.\n\
7252 Options are 'forward' or 'reverse'."),
7253 _("Show direction of execution (forward/reverse)."),
7254 _("Tells gdb whether to execute forward or backward."),
7255 set_exec_direction_func, show_exec_direction_func,
7256 &setlist, &showlist);
7257
7258 /* Set/show detach-on-fork: user-settable mode. */
7259
7260 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7261 Set whether gdb will detach the child of a fork."), _("\
7262 Show whether gdb will detach the child of a fork."), _("\
7263 Tells gdb whether to detach the child of a fork."),
7264 NULL, NULL, &setlist, &showlist);
7265
7266 /* Set/show disable address space randomization mode. */
7267
7268 add_setshow_boolean_cmd ("disable-randomization", class_support,
7269 &disable_randomization, _("\
7270 Set disabling of debuggee's virtual address space randomization."), _("\
7271 Show disabling of debuggee's virtual address space randomization."), _("\
7272 When this mode is on (which is the default), randomization of the virtual\n\
7273 address space is disabled. Standalone programs run with the randomization\n\
7274 enabled by default on some platforms."),
7275 &set_disable_randomization,
7276 &show_disable_randomization,
7277 &setlist, &showlist);
7278
7279 /* ptid initializations */
7280 inferior_ptid = null_ptid;
7281 target_last_wait_ptid = minus_one_ptid;
7282
7283 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7284 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7285 observer_attach_thread_exit (infrun_thread_thread_exit);
7286 observer_attach_inferior_exit (infrun_inferior_exit);
7287
7288 /* Explicitly create without lookup, since that tries to create a
7289 value with a void typed value, and when we get here, gdbarch
7290 isn't initialized yet. At this point, we're quite sure there
7291 isn't another convenience variable of the same name. */
7292 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7293
7294 add_setshow_boolean_cmd ("observer", no_class,
7295 &observer_mode_1, _("\
7296 Set whether gdb controls the inferior in observer mode."), _("\
7297 Show whether gdb controls the inferior in observer mode."), _("\
7298 In observer mode, GDB can get data from the inferior, but not\n\
7299 affect its execution. Registers and memory may not be changed,\n\
7300 breakpoints may not be set, and the program cannot be interrupted\n\
7301 or signalled."),
7302 set_observer_mode,
7303 show_observer_mode,
7304 &setlist,
7305 &showlist);
7306 }