]>
Commit | Line | Data |
---|---|---|
1 | diff --git a/gdb/infrun.c b/gdb/infrun.c | |
2 | index 91e0fc2..1d7c808 100644 | |
3 | --- a/gdb/infrun.c | |
4 | +++ b/gdb/infrun.c | |
5 | @@ -3111,6 +3111,56 @@ fill_in_stop_func (struct gdbarch *gdbarch, | |
6 | } | |
7 | } | |
8 | ||
9 | +/* Argument for at_solib_event_breakpoint_helper. */ | |
10 | + | |
11 | +struct solib_event_breakpoint_helper_arg | |
12 | +{ | |
13 | + CORE_ADDR prev_pc; | |
14 | + int shlib_bp_count; | |
15 | + int other_bp_count; | |
16 | +}; | |
17 | + | |
18 | +/* Helper for at_solib_event_breakpoint. */ | |
19 | + | |
20 | +static int | |
21 | +at_solib_event_breakpoint_helper (struct breakpoint *b, void *argp) | |
22 | +{ | |
23 | + struct solib_event_breakpoint_helper_arg *arg | |
24 | + = (struct solib_event_breakpoint_helper_arg *) argp; | |
25 | + struct bp_location *loc; | |
26 | + | |
27 | + for (loc = b->loc; loc; loc = loc->next) | |
28 | + { | |
29 | + if (loc->pspace == current_program_space | |
30 | + && (loc->address == stop_pc || loc->address == arg->prev_pc)) | |
31 | + { | |
32 | + if (b->type == bp_shlib_event) | |
33 | + arg->shlib_bp_count++; | |
34 | + else | |
35 | + { | |
36 | + arg->other_bp_count++; | |
37 | + return 1; /* quick exit */ | |
38 | + } | |
39 | + } | |
40 | + } | |
41 | + | |
42 | + return 0; /* carry on looking */ | |
43 | +} | |
44 | + | |
45 | +/* Nonzero if the location stopoed at is the shlib event breakpoint. */ | |
46 | + | |
47 | +static int | |
48 | +at_solib_event_breakpoint (struct execution_control_state *ecs) | |
49 | +{ | |
50 | + struct solib_event_breakpoint_helper_arg arg; | |
51 | + arg.prev_pc = ecs->event_thread->prev_pc; | |
52 | + arg.shlib_bp_count = arg.other_bp_count = 0; | |
53 | + | |
54 | + iterate_over_breakpoints (at_solib_event_breakpoint_helper, &arg); | |
55 | + | |
56 | + return arg.shlib_bp_count && !arg.other_bp_count; | |
57 | +} | |
58 | + | |
59 | /* Given an execution control state that has been freshly filled in | |
60 | by an event from the inferior, figure out what it means and take | |
61 | appropriate action. */ | |
62 | @@ -3964,11 +4014,23 @@ handle_inferior_event (struct execution_control_state *ecs) | |
63 | ecs->random_signal = 0; | |
64 | stopped_by_random_signal = 0; | |
65 | ||
66 | - /* Hide inlined functions starting here, unless we just performed stepi or | |
67 | - nexti. After stepi and nexti, always show the innermost frame (not any | |
68 | - inline function call sites). */ | |
69 | - if (ecs->event_thread->control.step_range_end != 1) | |
70 | - skip_inline_frames (ecs->ptid); | |
71 | + /* If we have stopped at the solib event breakpoint and | |
72 | + stop_on_solib_events is not set then we can avoid calling | |
73 | + anything that calls find_pc_section. This saves a lot | |
74 | + of time when the inferior loads a lot of shared libraries, | |
75 | + because otherwise the section map gets regenerated every | |
76 | + time we stop. */ | |
77 | + if (stop_on_solib_events | |
78 | + || ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP | |
79 | + || stop_after_trap | |
80 | + || !at_solib_event_breakpoint (ecs)) | |
81 | + { | |
82 | + /* Hide inlined functions starting here, unless we just | |
83 | + performed stepi or nexti. After stepi and nexti, always show | |
84 | + the innermost frame (not any inline function call sites). */ | |
85 | + if (ecs->event_thread->control.step_range_end != 1) | |
86 | + skip_inline_frames (ecs->ptid); | |
87 | + } | |
88 | ||
89 | if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP | |
90 | && ecs->event_thread->control.trap_expected |