]>
Commit | Line | Data |
---|---|---|
1 | /* Multi-process/thread control defs for GDB, the GNU debugger. | |
2 | Copyright (C) 1987-2025 Free Software Foundation, Inc. | |
3 | Contributed by Lynx Real-Time Systems, Inc. Los Gatos, CA. | |
4 | ||
5 | ||
6 | This file is part of GDB. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 3 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | #ifndef GDB_GDBTHREAD_H | |
22 | #define GDB_GDBTHREAD_H | |
23 | ||
24 | struct symtab; | |
25 | ||
26 | #include "breakpoint.h" | |
27 | #include "frame.h" | |
28 | #include "ui-out.h" | |
29 | #include "btrace.h" | |
30 | #include "target/waitstatus.h" | |
31 | #include "target/target.h" | |
32 | #include "cli/cli-utils.h" | |
33 | #include "gdbsupport/refcounted-object.h" | |
34 | #include "gdbsupport/common-gdbthread.h" | |
35 | #include "gdbsupport/forward-scope-exit.h" | |
36 | #include "displaced-stepping.h" | |
37 | #include "gdbsupport/intrusive_list.h" | |
38 | #include "thread-fsm.h" | |
39 | #include "language.h" | |
40 | ||
41 | struct inferior; | |
42 | struct process_stratum_target; | |
43 | ||
44 | /* When true, print debug messages related to GDB thread creation and | |
45 | deletion. */ | |
46 | ||
47 | extern bool debug_threads; | |
48 | ||
49 | /* Print a "threads" debug statement. */ | |
50 | ||
51 | #define threads_debug_printf(fmt, ...) \ | |
52 | debug_prefixed_printf_cond (debug_threads, "threads", fmt, ##__VA_ARGS__) | |
53 | ||
54 | /* Frontend view of the thread state. Possible extensions: stepping, | |
55 | finishing, until(ling),... | |
56 | ||
57 | NOTE: Since the thread state is not a boolean, most times, you do | |
58 | not want to check it with negation. If you really want to check if | |
59 | the thread is stopped, | |
60 | ||
61 | use (good): | |
62 | ||
63 | if (tp->state == THREAD_STOPPED) | |
64 | ||
65 | instead of (bad): | |
66 | ||
67 | if (tp->state != THREAD_RUNNING) | |
68 | ||
69 | The latter is also true for exited threads, most likely not what | |
70 | you want. */ | |
71 | enum thread_state | |
72 | { | |
73 | /* In the frontend's perspective, the thread is stopped. */ | |
74 | THREAD_STOPPED, | |
75 | ||
76 | /* In the frontend's perspective, the thread is running. */ | |
77 | THREAD_RUNNING, | |
78 | ||
79 | /* The thread is listed, but known to have exited. We keep it | |
80 | listed (but not visible) until it's safe to delete it. */ | |
81 | THREAD_EXITED, | |
82 | }; | |
83 | ||
84 | /* STEP_OVER_ALL means step over all subroutine calls. | |
85 | STEP_OVER_UNDEBUGGABLE means step over calls to undebuggable functions. | |
86 | STEP_OVER_NONE means don't step over any subroutine calls. */ | |
87 | ||
88 | enum step_over_calls_kind | |
89 | { | |
90 | STEP_OVER_NONE, | |
91 | STEP_OVER_ALL, | |
92 | STEP_OVER_UNDEBUGGABLE | |
93 | }; | |
94 | ||
95 | /* Inferior thread specific part of `struct infcall_control_state'. | |
96 | ||
97 | Inferior process counterpart is `struct inferior_control_state'. */ | |
98 | ||
99 | struct thread_control_state | |
100 | { | |
101 | /* User/external stepping state. */ | |
102 | ||
103 | /* Step-resume or longjmp-resume breakpoint. */ | |
104 | struct breakpoint *step_resume_breakpoint = nullptr; | |
105 | ||
106 | /* Exception-resume breakpoint. */ | |
107 | struct breakpoint *exception_resume_breakpoint = nullptr; | |
108 | ||
109 | /* Breakpoints used for software single stepping. Plural, because | |
110 | it may have multiple locations. E.g., if stepping over a | |
111 | conditional branch instruction we can't decode the condition for, | |
112 | we'll need to put a breakpoint at the branch destination, and | |
113 | another at the instruction after the branch. */ | |
114 | struct breakpoint *single_step_breakpoints = nullptr; | |
115 | ||
116 | /* Range to single step within. | |
117 | ||
118 | If this is nonzero, respond to a single-step signal by continuing | |
119 | to step if the pc is in this range. | |
120 | ||
121 | If step_range_start and step_range_end are both 1, it means to | |
122 | step for a single instruction (FIXME: it might clean up | |
123 | wait_for_inferior in a minor way if this were changed to the | |
124 | address of the instruction and that address plus one. But maybe | |
125 | not). */ | |
126 | CORE_ADDR step_range_start = 0; /* Inclusive */ | |
127 | CORE_ADDR step_range_end = 0; /* Exclusive */ | |
128 | ||
129 | /* Function the thread was in as of last it started stepping. */ | |
130 | struct symbol *step_start_function = nullptr; | |
131 | ||
132 | /* If GDB issues a target step request, and this is nonzero, the | |
133 | target should single-step this thread once, and then continue | |
134 | single-stepping it without GDB core involvement as long as the | |
135 | thread stops in the step range above. If this is zero, the | |
136 | target should ignore the step range, and only issue one single | |
137 | step. */ | |
138 | int may_range_step = 0; | |
139 | ||
140 | /* Stack frame address as of when stepping command was issued. | |
141 | This is how we know when we step into a subroutine call, and how | |
142 | to set the frame for the breakpoint used to step out. */ | |
143 | struct frame_id step_frame_id {}; | |
144 | ||
145 | /* Similarly, the frame ID of the underlying stack frame (skipping | |
146 | any inlined frames). */ | |
147 | struct frame_id step_stack_frame_id {}; | |
148 | ||
149 | /* True if the the thread is presently stepping over a breakpoint or | |
150 | a watchpoint, either with an inline step over or a displaced (out | |
151 | of line) step, and we're now expecting it to report a trap for | |
152 | the finished single step. */ | |
153 | int trap_expected = 0; | |
154 | ||
155 | /* Nonzero if the thread is being proceeded for a "finish" command | |
156 | or a similar situation when return value should be printed. */ | |
157 | int proceed_to_finish = 0; | |
158 | ||
159 | /* Nonzero if the thread is being proceeded for an inferior function | |
160 | call. */ | |
161 | int in_infcall = 0; | |
162 | ||
163 | enum step_over_calls_kind step_over_calls = STEP_OVER_NONE; | |
164 | ||
165 | /* Nonzero if stopped due to a step command. */ | |
166 | int stop_step = 0; | |
167 | ||
168 | /* Chain containing status of breakpoint(s) the thread stopped | |
169 | at. */ | |
170 | bpstat *stop_bpstat = nullptr; | |
171 | ||
172 | /* Whether the command that started the thread was a stepping | |
173 | command. This is used to decide whether "set scheduler-locking | |
174 | step" behaves like "on" or "off". */ | |
175 | int stepping_command = 0; | |
176 | ||
177 | /* True if the thread is evaluating a BP condition. */ | |
178 | bool in_cond_eval = false; | |
179 | }; | |
180 | ||
181 | /* Inferior thread specific part of `struct infcall_suspend_state'. */ | |
182 | ||
183 | struct thread_suspend_state | |
184 | { | |
185 | /* Last signal that the inferior received (why it stopped). When | |
186 | the thread is resumed, this signal is delivered. Note: the | |
187 | target should not check whether the signal is in pass state, | |
188 | because the signal may have been explicitly passed with the | |
189 | "signal" command, which overrides "handle nopass". If the signal | |
190 | should be suppressed, the core will take care of clearing this | |
191 | before the target is resumed. */ | |
192 | enum gdb_signal stop_signal = GDB_SIGNAL_0; | |
193 | ||
194 | /* The reason the thread last stopped, if we need to track it | |
195 | (breakpoint, watchpoint, etc.) */ | |
196 | enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON; | |
197 | ||
198 | /* The waitstatus for this thread's last event. */ | |
199 | struct target_waitstatus waitstatus; | |
200 | /* If true WAITSTATUS hasn't been handled yet. */ | |
201 | int waitstatus_pending_p = 0; | |
202 | ||
203 | /* Record the pc of the thread the last time it stopped. (This is | |
204 | not the current thread's PC as that may have changed since the | |
205 | last stop, e.g., "return" command, or "p $pc = 0xf000"). | |
206 | ||
207 | - If the thread's PC has not changed since the thread last | |
208 | stopped, then proceed skips a breakpoint at the current PC, | |
209 | otherwise we let the thread run into the breakpoint. | |
210 | ||
211 | - If the thread has an unprocessed event pending, as indicated by | |
212 | waitstatus_pending_p, this is used in coordination with | |
213 | stop_reason: if the thread's PC has changed since the thread | |
214 | last stopped, a pending breakpoint waitstatus is discarded. | |
215 | ||
216 | - If the thread is running, then this field has its value removed by | |
217 | calling stop_pc.reset() (see thread_info::set_executing()). | |
218 | Attempting to read a std::optional with no value is undefined | |
219 | behavior and will trigger an assertion error when _GLIBCXX_DEBUG is | |
220 | defined, which should make error easier to track down. */ | |
221 | std::optional<CORE_ADDR> stop_pc; | |
222 | }; | |
223 | ||
224 | /* Base class for target-specific thread data. */ | |
225 | struct private_thread_info | |
226 | { | |
227 | virtual ~private_thread_info () = 0; | |
228 | }; | |
229 | ||
230 | /* Unique pointer wrapper for private_thread_info. */ | |
231 | using private_thread_info_up = std::unique_ptr<private_thread_info>; | |
232 | ||
233 | /* Threads are intrusively refcounted objects. Being the | |
234 | user-selected thread is normally considered an implicit strong | |
235 | reference and is thus not accounted in the refcount, unlike | |
236 | inferior objects. This is necessary, because there's no "current | |
237 | thread" pointer. Instead the current thread is inferred from the | |
238 | inferior_ptid global. However, when GDB needs to remember the | |
239 | selected thread to later restore it, GDB bumps the thread object's | |
240 | refcount, to prevent something deleting the thread object before | |
241 | reverting back (e.g., due to a "kill" command). If the thread | |
242 | meanwhile exits before being re-selected, then the thread object is | |
243 | left listed in the thread list, but marked with state | |
244 | THREAD_EXITED. (See scoped_restore_current_thread and | |
245 | delete_thread). All other thread references are considered weak | |
246 | references. Placing a thread in the thread list is an implicit | |
247 | strong reference, and is thus not accounted for in the thread's | |
248 | refcount. | |
249 | ||
250 | The intrusive_list_node base links threads in a per-inferior list. | |
251 | We place it first in the inherit order to work around PR gcc/113599. */ | |
252 | ||
253 | class thread_info : public intrusive_list_node<thread_info>, | |
254 | public refcounted_object | |
255 | { | |
256 | public: | |
257 | explicit thread_info (inferior *inf, ptid_t ptid); | |
258 | ~thread_info (); | |
259 | ||
260 | bool deletable () const; | |
261 | ||
262 | /* Mark this thread as running and notify observers. */ | |
263 | void set_running (bool running); | |
264 | ||
265 | ptid_t ptid; /* "Actual process id"; | |
266 | In fact, this may be overloaded with | |
267 | kernel thread id, etc. */ | |
268 | ||
269 | /* Each thread has two GDB IDs. | |
270 | ||
271 | a) The thread ID (Id). This consists of the pair of: | |
272 | ||
273 | - the number of the thread's inferior and, | |
274 | ||
275 | - the thread's thread number in its inferior, aka, the | |
276 | per-inferior thread number. This number is unique in the | |
277 | inferior but not unique between inferiors. | |
278 | ||
279 | b) The global ID (GId). This is a a single integer unique | |
280 | between all inferiors. | |
281 | ||
282 | E.g.: | |
283 | ||
284 | (gdb) info threads -gid | |
285 | Id GId Target Id Frame | |
286 | * 1.1 1 Thread A 0x16a09237 in foo () at foo.c:10 | |
287 | 1.2 3 Thread B 0x15ebc6ed in bar () at foo.c:20 | |
288 | 1.3 5 Thread C 0x15ebc6ed in bar () at foo.c:20 | |
289 | 2.1 2 Thread A 0x16a09237 in foo () at foo.c:10 | |
290 | 2.2 4 Thread B 0x15ebc6ed in bar () at foo.c:20 | |
291 | 2.3 6 Thread C 0x15ebc6ed in bar () at foo.c:20 | |
292 | ||
293 | Above, both inferiors 1 and 2 have threads numbered 1-3, but each | |
294 | thread has its own unique global ID. */ | |
295 | ||
296 | /* The thread's global GDB thread number. This is exposed to MI, | |
297 | Python/Scheme, visible with "info threads -gid", and is also what | |
298 | the $_gthread convenience variable is bound to. */ | |
299 | int global_num; | |
300 | ||
301 | /* The per-inferior thread number. This is unique in the inferior | |
302 | the thread belongs to, but not unique between inferiors. This is | |
303 | what the $_thread convenience variable is bound to. */ | |
304 | int per_inf_num; | |
305 | ||
306 | /* The inferior this thread belongs to. */ | |
307 | struct inferior *inf; | |
308 | ||
309 | /* The user-given name of the thread. | |
310 | ||
311 | Returns nullptr if the thread does not have a user-given name. */ | |
312 | const char *name () const | |
313 | { | |
314 | return m_name.get (); | |
315 | } | |
316 | ||
317 | /* Set the user-given name of the thread. | |
318 | ||
319 | Pass nullptr to clear the name. */ | |
320 | void set_name (gdb::unique_xmalloc_ptr<char> name) | |
321 | { | |
322 | m_name = std::move (name); | |
323 | } | |
324 | ||
325 | bool executing () const | |
326 | { return m_executing; } | |
327 | ||
328 | /* Set the thread's 'm_executing' field from EXECUTING, and if EXECUTING | |
329 | is true also clears the thread's stop_pc. */ | |
330 | void set_executing (bool executing); | |
331 | ||
332 | bool resumed () const | |
333 | { return m_resumed; } | |
334 | ||
335 | /* Set the thread's 'm_resumed' field from RESUMED. The thread may also | |
336 | be added to (when RESUMED is true), or removed from (when RESUMED is | |
337 | false), the list of threads with a pending wait status. */ | |
338 | void set_resumed (bool resumed); | |
339 | ||
340 | /* Frontend view of the thread state. Note that the THREAD_RUNNING/ | |
341 | THREAD_STOPPED states are different from EXECUTING. When the | |
342 | thread is stopped internally while handling an internal event, | |
343 | like a software single-step breakpoint, EXECUTING will be false, | |
344 | but STATE will still be THREAD_RUNNING. */ | |
345 | enum thread_state state = THREAD_STOPPED; | |
346 | ||
347 | /* State of GDB control of inferior thread execution. | |
348 | See `struct thread_control_state'. */ | |
349 | thread_control_state control; | |
350 | ||
351 | /* Save M_SUSPEND to SUSPEND. */ | |
352 | ||
353 | void save_suspend_to (thread_suspend_state &suspend) const | |
354 | { | |
355 | suspend = m_suspend; | |
356 | } | |
357 | ||
358 | /* Restore M_SUSPEND from SUSPEND. */ | |
359 | ||
360 | void restore_suspend_from (const thread_suspend_state &suspend) | |
361 | { | |
362 | m_suspend = suspend; | |
363 | } | |
364 | ||
365 | /* Return this thread's stop PC. This should only be called when it is | |
366 | known that stop_pc has a value. If this function is being used in a | |
367 | situation where a thread may not have had a stop_pc assigned, then | |
368 | stop_pc_p() can be used to check if the stop_pc is defined. */ | |
369 | ||
370 | CORE_ADDR stop_pc () const | |
371 | { | |
372 | gdb_assert (m_suspend.stop_pc.has_value ()); | |
373 | return *m_suspend.stop_pc; | |
374 | } | |
375 | ||
376 | /* Set this thread's stop PC. */ | |
377 | ||
378 | void set_stop_pc (CORE_ADDR stop_pc) | |
379 | { | |
380 | m_suspend.stop_pc = stop_pc; | |
381 | } | |
382 | ||
383 | /* Remove the stop_pc stored on this thread. */ | |
384 | ||
385 | void clear_stop_pc () | |
386 | { | |
387 | m_suspend.stop_pc.reset (); | |
388 | } | |
389 | ||
390 | /* Return true if this thread has a cached stop pc value, otherwise | |
391 | return false. */ | |
392 | ||
393 | bool stop_pc_p () const | |
394 | { | |
395 | return m_suspend.stop_pc.has_value (); | |
396 | } | |
397 | ||
398 | /* Return true if this thread has a pending wait status. */ | |
399 | ||
400 | bool has_pending_waitstatus () const | |
401 | { | |
402 | return m_suspend.waitstatus_pending_p; | |
403 | } | |
404 | ||
405 | /* Get this thread's pending wait status. | |
406 | ||
407 | May only be called if has_pending_waitstatus returns true. */ | |
408 | ||
409 | const target_waitstatus &pending_waitstatus () const | |
410 | { | |
411 | gdb_assert (this->has_pending_waitstatus ()); | |
412 | ||
413 | return m_suspend.waitstatus; | |
414 | } | |
415 | ||
416 | /* Set this thread's pending wait status. | |
417 | ||
418 | May only be called if has_pending_waitstatus returns false. */ | |
419 | ||
420 | void set_pending_waitstatus (const target_waitstatus &ws); | |
421 | ||
422 | /* Clear this thread's pending wait status. | |
423 | ||
424 | May only be called if has_pending_waitstatus returns true. */ | |
425 | ||
426 | void clear_pending_waitstatus (); | |
427 | ||
428 | /* Return this thread's stop signal. */ | |
429 | ||
430 | gdb_signal stop_signal () const | |
431 | { | |
432 | return m_suspend.stop_signal; | |
433 | } | |
434 | ||
435 | /* Set this thread's stop signal. */ | |
436 | ||
437 | void set_stop_signal (gdb_signal sig) | |
438 | { | |
439 | m_suspend.stop_signal = sig; | |
440 | } | |
441 | ||
442 | /* Return this thread's stop reason. */ | |
443 | ||
444 | target_stop_reason stop_reason () const | |
445 | { | |
446 | return m_suspend.stop_reason; | |
447 | } | |
448 | ||
449 | /* Set this thread's stop reason. */ | |
450 | ||
451 | void set_stop_reason (target_stop_reason reason) | |
452 | { | |
453 | m_suspend.stop_reason = reason; | |
454 | } | |
455 | ||
456 | /* Get the FSM associated with the thread. */ | |
457 | ||
458 | struct thread_fsm *thread_fsm () const | |
459 | { | |
460 | return m_thread_fsm.get (); | |
461 | } | |
462 | ||
463 | /* Get the owning reference to the FSM associated with the thread. | |
464 | ||
465 | After a call to this method, "thread_fsm () == nullptr". */ | |
466 | ||
467 | std::unique_ptr<struct thread_fsm> release_thread_fsm () | |
468 | { | |
469 | return std::move (m_thread_fsm); | |
470 | } | |
471 | ||
472 | /* Set the FSM associated with the current thread. | |
473 | ||
474 | It is invalid to set the FSM if another FSM is already installed. */ | |
475 | ||
476 | void set_thread_fsm (std::unique_ptr<struct thread_fsm> fsm) | |
477 | { | |
478 | gdb_assert (m_thread_fsm == nullptr); | |
479 | m_thread_fsm = std::move (fsm); | |
480 | } | |
481 | ||
482 | /* Record the thread options last set for this thread. */ | |
483 | ||
484 | void set_thread_options (gdb_thread_options thread_options); | |
485 | ||
486 | /* Get the thread options last set for this thread. */ | |
487 | ||
488 | gdb_thread_options thread_options () const | |
489 | { | |
490 | return m_thread_options; | |
491 | } | |
492 | ||
493 | int current_line = 0; | |
494 | struct symtab *current_symtab = NULL; | |
495 | ||
496 | /* Internal stepping state. */ | |
497 | ||
498 | /* Record the pc of the thread the last time it was resumed. (It | |
499 | can't be done on stop as the PC may change since the last stop, | |
500 | e.g., "return" command, or "p $pc = 0xf000"). This is maintained | |
501 | by proceed and keep_going, and among other things, it's used in | |
502 | adjust_pc_after_break to distinguish a hardware single-step | |
503 | SIGTRAP from a breakpoint SIGTRAP. */ | |
504 | CORE_ADDR prev_pc = 0; | |
505 | ||
506 | /* Did we set the thread stepping a breakpoint instruction? This is | |
507 | used in conjunction with PREV_PC to decide whether to adjust the | |
508 | PC. */ | |
509 | int stepped_breakpoint = 0; | |
510 | ||
511 | /* Should we step over breakpoint next time keep_going is called? */ | |
512 | int stepping_over_breakpoint = 0; | |
513 | ||
514 | /* Should we step over a watchpoint next time keep_going is called? | |
515 | This is needed on targets with non-continuable, non-steppable | |
516 | watchpoints. */ | |
517 | int stepping_over_watchpoint = 0; | |
518 | ||
519 | /* Set to TRUE if we should finish single-stepping over a breakpoint | |
520 | after hitting the current step-resume breakpoint. The context here | |
521 | is that GDB is to do `next' or `step' while signal arrives. | |
522 | When stepping over a breakpoint and signal arrives, GDB will attempt | |
523 | to skip signal handler, so it inserts a step_resume_breakpoint at the | |
524 | signal return address, and resume inferior. | |
525 | step_after_step_resume_breakpoint is set to TRUE at this moment in | |
526 | order to keep GDB in mind that there is still a breakpoint to step over | |
527 | when GDB gets back SIGTRAP from step_resume_breakpoint. */ | |
528 | int step_after_step_resume_breakpoint = 0; | |
529 | ||
530 | /* This is used to remember when a fork or vfork event was caught by | |
531 | a catchpoint, and thus the event is to be followed at the next | |
532 | resume of the thread, and not immediately. */ | |
533 | struct target_waitstatus pending_follow; | |
534 | ||
535 | /* True if this thread has been explicitly requested to stop. */ | |
536 | bool stop_requested = false; | |
537 | ||
538 | /* The initiating frame of a nexting operation, used for deciding | |
539 | which exceptions to intercept. If it is null_frame_id no | |
540 | bp_longjmp or bp_exception but longjmp has been caught just for | |
541 | bp_longjmp_call_dummy. */ | |
542 | struct frame_id initiating_frame = null_frame_id; | |
543 | ||
544 | /* Private data used by the target vector implementation. */ | |
545 | private_thread_info_up priv; | |
546 | ||
547 | /* Branch trace information for this thread. */ | |
548 | struct btrace_thread_info btrace {}; | |
549 | ||
550 | /* Flag which indicates that the stack temporaries should be stored while | |
551 | evaluating expressions. */ | |
552 | bool stack_temporaries_enabled = false; | |
553 | ||
554 | /* Values that are stored as temporaries on stack while evaluating | |
555 | expressions. */ | |
556 | std::vector<struct value *> stack_temporaries; | |
557 | ||
558 | /* Step-over chain. A thread is in the step-over queue if this node is | |
559 | linked. */ | |
560 | intrusive_list_node<thread_info> step_over_list_node; | |
561 | ||
562 | /* Node for list of threads that are resumed and have a pending wait status. | |
563 | ||
564 | The list head for this is in process_stratum_target, hence all threads in | |
565 | this list belong to that process target. */ | |
566 | intrusive_list_node<thread_info> resumed_with_pending_wait_status_node; | |
567 | ||
568 | /* Displaced-step state for this thread. */ | |
569 | displaced_step_thread_state displaced_step_state; | |
570 | ||
571 | private: | |
572 | /* True if this thread is resumed from infrun's perspective. | |
573 | Note that a thread can be marked both as not-executing and | |
574 | resumed at the same time. This happens if we try to resume a | |
575 | thread that has a wait status pending. We shouldn't let the | |
576 | thread really run until that wait status has been processed, but | |
577 | we should not process that wait status if we didn't try to let | |
578 | the thread run. */ | |
579 | bool m_resumed = false; | |
580 | ||
581 | /* True means the thread is executing. Note: this is different | |
582 | from saying that there is an active target and we are stopped at | |
583 | a breakpoint, for instance. This is a real indicator whether the | |
584 | thread is off and running. */ | |
585 | bool m_executing = false; | |
586 | ||
587 | /* State of inferior thread to restore after GDB is done with an inferior | |
588 | call. See `struct thread_suspend_state'. */ | |
589 | thread_suspend_state m_suspend; | |
590 | ||
591 | /* The user-given name of the thread. | |
592 | ||
593 | Nullptr if the thread does not have a user-given name. */ | |
594 | gdb::unique_xmalloc_ptr<char> m_name; | |
595 | ||
596 | /* Pointer to the state machine manager object that handles what is | |
597 | left to do for the thread's execution command after the target | |
598 | stops. Several execution commands use it. */ | |
599 | std::unique_ptr<struct thread_fsm> m_thread_fsm; | |
600 | ||
601 | /* The thread options as last set with a call to | |
602 | set_thread_options. */ | |
603 | gdb_thread_options m_thread_options; | |
604 | }; | |
605 | ||
606 | using thread_info_resumed_with_pending_wait_status_node | |
607 | = intrusive_member_node<thread_info, | |
608 | &thread_info::resumed_with_pending_wait_status_node>; | |
609 | using thread_info_resumed_with_pending_wait_status_list | |
610 | = intrusive_list<thread_info, | |
611 | thread_info_resumed_with_pending_wait_status_node>; | |
612 | ||
613 | /* A gdb::ref_ptr pointer to a thread_info. */ | |
614 | ||
615 | using thread_info_ref | |
616 | = gdb::ref_ptr<struct thread_info, refcounted_object_ref_policy>; | |
617 | ||
618 | /* A gdb::ref_ptr pointer to an inferior. This would ideally be in | |
619 | inferior.h, but it can't due to header dependencies (inferior.h | |
620 | includes gdbthread.h). */ | |
621 | ||
622 | using inferior_ref | |
623 | = gdb::ref_ptr<struct inferior, refcounted_object_ref_policy>; | |
624 | ||
625 | /* Create an empty thread list, or empty the existing one. */ | |
626 | extern void init_thread_list (void); | |
627 | ||
628 | /* Add a thread to the thread list, print a message | |
629 | that a new thread is found, and return the pointer to | |
630 | the new thread. Caller my use this pointer to | |
631 | initialize the private thread data. */ | |
632 | extern struct thread_info *add_thread (process_stratum_target *targ, | |
633 | ptid_t ptid); | |
634 | ||
635 | /* Same as add_thread, but does not print a message about new | |
636 | thread. */ | |
637 | extern struct thread_info *add_thread_silent (process_stratum_target *targ, | |
638 | ptid_t ptid); | |
639 | ||
640 | /* Same as add_thread, and sets the private info. */ | |
641 | extern struct thread_info *add_thread_with_info (process_stratum_target *targ, | |
642 | ptid_t ptid, | |
643 | private_thread_info_up); | |
644 | ||
645 | /* Delete thread THREAD and notify of thread exit. If the thread is | |
646 | currently not deletable, don't actually delete it but still tag it | |
647 | as exited and do the notification. EXIT_CODE is the thread's exit | |
648 | code. If SILENT, don't actually notify the CLI. THREAD must not | |
649 | be NULL or an assertion will fail. */ | |
650 | extern void delete_thread_with_exit_code (thread_info *thread, | |
651 | ULONGEST exit_code, | |
652 | bool silent = false); | |
653 | ||
654 | /* Delete thread THREAD and notify of thread exit. If the thread is | |
655 | currently not deletable, don't actually delete it but still tag it | |
656 | as exited and do the notification. THREAD must not be NULL or an | |
657 | assertion will fail. */ | |
658 | extern void delete_thread (thread_info *thread); | |
659 | ||
660 | /* Like delete_thread, but be quiet about it. Used when the process | |
661 | this thread belonged to has already exited, for example. */ | |
662 | extern void delete_thread_silent (struct thread_info *thread); | |
663 | ||
664 | /* Mark the thread exited, but don't delete it or remove it from the | |
665 | inferior thread list. EXIT_CODE is the thread's exit code, if | |
666 | available. If SILENT, then don't inform the CLI about the | |
667 | exit. */ | |
668 | extern void set_thread_exited (thread_info *tp, | |
669 | std::optional<ULONGEST> exit_code = {}, | |
670 | bool silent = false); | |
671 | ||
672 | /* Delete a step_resume_breakpoint from the thread database. */ | |
673 | extern void delete_step_resume_breakpoint (struct thread_info *); | |
674 | ||
675 | /* Delete an exception_resume_breakpoint from the thread database. */ | |
676 | extern void delete_exception_resume_breakpoint (struct thread_info *); | |
677 | ||
678 | /* Delete the single-step breakpoints of thread TP, if any. */ | |
679 | extern void delete_single_step_breakpoints (struct thread_info *tp); | |
680 | ||
681 | /* Check if the thread has software single stepping breakpoints | |
682 | set. */ | |
683 | extern int thread_has_single_step_breakpoints_set (struct thread_info *tp); | |
684 | ||
685 | /* Check whether the thread has software single stepping breakpoints | |
686 | set at PC. */ | |
687 | extern int thread_has_single_step_breakpoint_here (struct thread_info *tp, | |
688 | const address_space *aspace, | |
689 | CORE_ADDR addr); | |
690 | ||
691 | /* Returns whether to show inferior-qualified thread IDs, or plain | |
692 | thread numbers. Inferior-qualified IDs are shown whenever we have | |
693 | multiple inferiors, or the only inferior left has number > 1. */ | |
694 | extern int show_inferior_qualified_tids (void); | |
695 | ||
696 | /* Return a string version of THR's thread ID. If there are multiple | |
697 | inferiors, then this prints the inferior-qualifier form, otherwise | |
698 | it only prints the thread number. The result is stored in a | |
699 | circular static buffer, NUMCELLS deep. */ | |
700 | const char *print_thread_id (struct thread_info *thr); | |
701 | ||
702 | /* Like print_thread_id, but always prints the inferior-qualified form, | |
703 | even when there is only a single inferior. */ | |
704 | const char *print_full_thread_id (struct thread_info *thr); | |
705 | ||
706 | /* Boolean test for an already-known ptid. */ | |
707 | extern bool in_thread_list (process_stratum_target *targ, ptid_t ptid); | |
708 | ||
709 | /* Boolean test for an already-known global thread id (GDB's homegrown | |
710 | global id, not the system's). */ | |
711 | extern int valid_global_thread_id (int global_id); | |
712 | ||
713 | /* Find thread by GDB global thread ID. */ | |
714 | struct thread_info *find_thread_global_id (int global_id); | |
715 | ||
716 | /* Find thread by thread library specific handle in inferior INF. */ | |
717 | struct thread_info *find_thread_by_handle | |
718 | (gdb::array_view<const gdb_byte> handle, struct inferior *inf); | |
719 | ||
720 | /* Finds the first thread of the specified inferior. */ | |
721 | extern struct thread_info *first_thread_of_inferior (inferior *inf); | |
722 | ||
723 | /* Returns any thread of inferior INF, giving preference to the | |
724 | current thread. */ | |
725 | extern struct thread_info *any_thread_of_inferior (inferior *inf); | |
726 | ||
727 | /* Returns any non-exited thread of inferior INF, giving preference to | |
728 | the current thread, and to not executing threads. */ | |
729 | extern struct thread_info *any_live_thread_of_inferior (inferior *inf); | |
730 | ||
731 | /* Change the ptid of thread OLD_PTID to NEW_PTID. */ | |
732 | void thread_change_ptid (process_stratum_target *targ, | |
733 | ptid_t old_ptid, ptid_t new_ptid); | |
734 | ||
735 | /* Iterator function to call a user-provided callback function | |
736 | once for each known thread. */ | |
737 | typedef gdb::function_view<bool (struct thread_info *)> thread_callback_func; | |
738 | extern struct thread_info *iterate_over_threads (thread_callback_func); | |
739 | ||
740 | /* Pull in the internals of the inferiors/threads ranges and | |
741 | iterators. Must be done after struct thread_info is defined. */ | |
742 | #include "thread-iter.h" | |
743 | ||
744 | /* Return a range that can be used to walk over threads, with | |
745 | range-for. | |
746 | ||
747 | Used like this, it walks over all threads of all inferiors of all | |
748 | targets: | |
749 | ||
750 | for (thread_info *thr : all_threads ()) | |
751 | { .... } | |
752 | ||
753 | FILTER_PTID can be used to filter out threads that don't match. | |
754 | FILTER_PTID can be: | |
755 | ||
756 | - minus_one_ptid, meaning walk all threads of all inferiors of | |
757 | PROC_TARGET. If PROC_TARGET is NULL, then of all targets. | |
758 | ||
759 | - A process ptid, in which case walk all threads of the specified | |
760 | process. PROC_TARGET must be non-NULL in this case. | |
761 | ||
762 | - A thread ptid, in which case walk that thread only. PROC_TARGET | |
763 | must be non-NULL in this case. | |
764 | */ | |
765 | ||
766 | inline all_matching_threads_range | |
767 | all_threads (process_stratum_target *proc_target = nullptr, | |
768 | ptid_t filter_ptid = minus_one_ptid) | |
769 | { | |
770 | return all_matching_threads_range (proc_target, filter_ptid); | |
771 | } | |
772 | ||
773 | /* Return a range that can be used to walk over all non-exited threads | |
774 | of all inferiors, with range-for. Arguments are like all_threads | |
775 | above. */ | |
776 | ||
777 | inline all_non_exited_threads_range | |
778 | all_non_exited_threads (process_stratum_target *proc_target = nullptr, | |
779 | ptid_t filter_ptid = minus_one_ptid) | |
780 | { | |
781 | return all_non_exited_threads_range (proc_target, filter_ptid); | |
782 | } | |
783 | ||
784 | /* Return a range that can be used to walk over all threads of all | |
785 | inferiors, with range-for, safely. I.e., it is safe to delete the | |
786 | currently-iterated thread. When combined with range-for, this | |
787 | allow convenient patterns like this: | |
788 | ||
789 | for (thread_info *t : all_threads_safe ()) | |
790 | if (some_condition ()) | |
791 | delete f; | |
792 | */ | |
793 | ||
794 | inline all_threads_safe_range | |
795 | all_threads_safe () | |
796 | { | |
797 | return all_threads_safe_range (all_threads_iterator::begin_t {}); | |
798 | } | |
799 | ||
800 | extern int thread_count (process_stratum_target *proc_target); | |
801 | ||
802 | /* Return true if we have any thread in any inferior. */ | |
803 | extern bool any_thread_p (); | |
804 | ||
805 | /* Switch context to thread THR. */ | |
806 | extern void switch_to_thread (struct thread_info *thr); | |
807 | ||
808 | /* Switch context to no thread selected. */ | |
809 | extern void switch_to_no_thread (); | |
810 | ||
811 | /* Switch from one thread to another. Does not read registers. */ | |
812 | extern void switch_to_thread_no_regs (struct thread_info *thread); | |
813 | ||
814 | /* Marks or clears thread(s) PTID of TARG as resumed. If PTID is | |
815 | MINUS_ONE_PTID, applies to all threads of TARG. If | |
816 | ptid_is_pid(PTID) is true, applies to all threads of the process | |
817 | pointed at by {TARG,PTID}. */ | |
818 | extern void set_resumed (process_stratum_target *targ, | |
819 | ptid_t ptid, bool resumed); | |
820 | ||
821 | /* Marks thread PTID of TARG as running, or as stopped. If PTID is | |
822 | minus_one_ptid, marks all threads of TARG. */ | |
823 | extern void set_running (process_stratum_target *targ, | |
824 | ptid_t ptid, bool running); | |
825 | ||
826 | /* Marks or clears thread(s) PTID of TARG as having been requested to | |
827 | stop. If PTID is MINUS_ONE_PTID, applies to all threads of TARG. | |
828 | If ptid_is_pid(PTID) is true, applies to all threads of the process | |
829 | pointed at by {TARG, PTID}. If STOP, then the | |
830 | THREAD_STOP_REQUESTED observer is called with PTID as argument. */ | |
831 | extern void set_stop_requested (process_stratum_target *targ, | |
832 | ptid_t ptid, bool stop); | |
833 | ||
834 | /* Marks thread PTID of TARG as executing, or not. If PTID is | |
835 | minus_one_ptid, marks all threads of TARG. | |
836 | ||
837 | Note that this is different from the running state. See the | |
838 | description of state and executing fields of struct | |
839 | thread_info. */ | |
840 | extern void set_executing (process_stratum_target *targ, | |
841 | ptid_t ptid, bool executing); | |
842 | ||
843 | /* True if any (known or unknown) thread of TARG is or may be | |
844 | executing. */ | |
845 | extern bool threads_are_executing (process_stratum_target *targ); | |
846 | ||
847 | /* Merge the executing property of thread PTID of TARG over to its | |
848 | thread state property (frontend running/stopped view). | |
849 | ||
850 | "not executing" -> "stopped" | |
851 | "executing" -> "running" | |
852 | "exited" -> "exited" | |
853 | ||
854 | If PTID is minus_one_ptid, go over all threads of TARG. | |
855 | ||
856 | Notifications are only emitted if the thread state did change. */ | |
857 | extern void finish_thread_state (process_stratum_target *targ, ptid_t ptid); | |
858 | ||
859 | /* Calls finish_thread_state on scope exit, unless release() is called | |
860 | to disengage. */ | |
861 | using scoped_finish_thread_state | |
862 | = FORWARD_SCOPE_EXIT (finish_thread_state); | |
863 | ||
864 | /* Commands with a prefix of `thread'. */ | |
865 | extern struct cmd_list_element *thread_cmd_list; | |
866 | ||
867 | extern void thread_command (const char *tidstr, int from_tty); | |
868 | ||
869 | /* Print notices on thread events (attach, detach, etc.), set with | |
870 | `set print thread-events'. */ | |
871 | extern bool print_thread_events; | |
872 | ||
873 | /* Prints the list of threads and their details on UIOUT. If | |
874 | REQUESTED_THREADS, a list of GDB ids/ranges, is not NULL, only | |
875 | print threads whose ID is included in the list. If PID is not -1, | |
876 | only print threads from the process PID. Otherwise, threads from | |
877 | all attached PIDs are printed. If both REQUESTED_THREADS is not | |
878 | NULL and PID is not -1, then the thread is printed if it belongs to | |
879 | the specified process. Otherwise, an error is raised. */ | |
880 | extern void print_thread_info (struct ui_out *uiout, | |
881 | const char *requested_threads, | |
882 | int pid); | |
883 | ||
884 | /* Save/restore current inferior/thread/frame. */ | |
885 | ||
886 | class scoped_restore_current_thread | |
887 | { | |
888 | public: | |
889 | scoped_restore_current_thread (); | |
890 | ~scoped_restore_current_thread (); | |
891 | ||
892 | scoped_restore_current_thread (scoped_restore_current_thread &&rhs); | |
893 | ||
894 | DISABLE_COPY_AND_ASSIGN (scoped_restore_current_thread); | |
895 | ||
896 | /* Cancel restoring on scope exit. */ | |
897 | void dont_restore () { m_dont_restore = true; } | |
898 | ||
899 | private: | |
900 | void restore (); | |
901 | ||
902 | bool m_dont_restore = false; | |
903 | thread_info_ref m_thread; | |
904 | inferior_ref m_inf; | |
905 | ||
906 | frame_id m_selected_frame_id; | |
907 | int m_selected_frame_level; | |
908 | bool m_was_stopped; | |
909 | /* Save/restore the language as well, because selecting a frame | |
910 | changes the current language to the frame's language if "set | |
911 | language auto". */ | |
912 | scoped_restore_current_language m_lang; | |
913 | }; | |
914 | ||
915 | /* Returns a pointer into the thread_info corresponding to | |
916 | INFERIOR_PTID. INFERIOR_PTID *must* be in the thread list. */ | |
917 | extern struct thread_info* inferior_thread (void); | |
918 | ||
919 | extern void update_thread_list (void); | |
920 | ||
921 | /* Delete any thread the target says is no longer alive. */ | |
922 | ||
923 | extern void prune_threads (void); | |
924 | ||
925 | /* Delete threads marked THREAD_EXITED. Unlike prune_threads, this | |
926 | does not consult the target about whether the thread is alive right | |
927 | now. */ | |
928 | extern void delete_exited_threads (void); | |
929 | ||
930 | /* Return true if PC is in the stepping range of THREAD. */ | |
931 | ||
932 | bool pc_in_thread_step_range (CORE_ADDR pc, struct thread_info *thread); | |
933 | ||
934 | /* Enable storing stack temporaries for thread THR and disable and | |
935 | clear the stack temporaries on destruction. Holds a strong | |
936 | reference to THR. */ | |
937 | ||
938 | class enable_thread_stack_temporaries | |
939 | { | |
940 | public: | |
941 | ||
942 | explicit enable_thread_stack_temporaries (struct thread_info *thr) | |
943 | : m_thr (thread_info_ref::new_reference (thr)) | |
944 | { | |
945 | m_thr->stack_temporaries_enabled = true; | |
946 | m_thr->stack_temporaries.clear (); | |
947 | } | |
948 | ||
949 | ~enable_thread_stack_temporaries () | |
950 | { | |
951 | m_thr->stack_temporaries_enabled = false; | |
952 | m_thr->stack_temporaries.clear (); | |
953 | } | |
954 | ||
955 | DISABLE_COPY_AND_ASSIGN (enable_thread_stack_temporaries); | |
956 | ||
957 | private: | |
958 | ||
959 | thread_info_ref m_thr; | |
960 | }; | |
961 | ||
962 | extern bool thread_stack_temporaries_enabled_p (struct thread_info *tp); | |
963 | ||
964 | extern void push_thread_stack_temporary (struct thread_info *tp, struct value *v); | |
965 | ||
966 | extern value *get_last_thread_stack_temporary (struct thread_info *tp); | |
967 | ||
968 | extern bool value_in_thread_stack_temporaries (struct value *, | |
969 | struct thread_info *thr); | |
970 | ||
971 | /* Thread step-over list type. */ | |
972 | using thread_step_over_list_node | |
973 | = intrusive_member_node<thread_info, &thread_info::step_over_list_node>; | |
974 | using thread_step_over_list | |
975 | = intrusive_list<thread_info, thread_step_over_list_node>; | |
976 | using thread_step_over_list_iterator | |
977 | = reference_to_pointer_iterator<thread_step_over_list::iterator>; | |
978 | using thread_step_over_list_safe_iterator | |
979 | = basic_safe_iterator<thread_step_over_list_iterator>; | |
980 | using thread_step_over_list_safe_range | |
981 | = iterator_range<thread_step_over_list_safe_iterator>; | |
982 | ||
983 | static inline thread_step_over_list_safe_range | |
984 | make_thread_step_over_list_safe_range (thread_step_over_list &list) | |
985 | { | |
986 | return thread_step_over_list_safe_range | |
987 | (thread_step_over_list_safe_iterator (list.begin (), | |
988 | list.end ()), | |
989 | thread_step_over_list_safe_iterator (list.end (), | |
990 | list.end ())); | |
991 | } | |
992 | ||
993 | /* Add TP to the end of the global pending step-over chain. */ | |
994 | ||
995 | extern void global_thread_step_over_chain_enqueue (thread_info *tp); | |
996 | ||
997 | /* Append the thread step over list LIST to the global thread step over | |
998 | chain. */ | |
999 | ||
1000 | extern void global_thread_step_over_chain_enqueue_chain | |
1001 | (thread_step_over_list &&list); | |
1002 | ||
1003 | /* Remove TP from the global pending step-over chain. */ | |
1004 | ||
1005 | extern void global_thread_step_over_chain_remove (thread_info *tp); | |
1006 | ||
1007 | /* Return true if TP is in any step-over chain. */ | |
1008 | ||
1009 | extern int thread_is_in_step_over_chain (struct thread_info *tp); | |
1010 | ||
1011 | /* Return the length of the the step over chain TP is in. | |
1012 | ||
1013 | If TP is non-nullptr, the thread must be in a step over chain. | |
1014 | TP may be nullptr, in which case it denotes an empty list, so a length of | |
1015 | 0. */ | |
1016 | ||
1017 | extern int thread_step_over_chain_length (const thread_step_over_list &l); | |
1018 | ||
1019 | /* Cancel any ongoing execution command. */ | |
1020 | ||
1021 | extern void thread_cancel_execution_command (struct thread_info *thr); | |
1022 | ||
1023 | /* Check whether it makes sense to access a register of the current | |
1024 | thread at this point. If not, throw an error (e.g., the thread is | |
1025 | executing). */ | |
1026 | extern void validate_registers_access (void); | |
1027 | ||
1028 | /* Check whether it makes sense to access a register of THREAD at this point. | |
1029 | Returns true if registers may be accessed; false otherwise. */ | |
1030 | extern bool can_access_registers_thread (struct thread_info *thread); | |
1031 | ||
1032 | /* Returns whether to show which thread hit the breakpoint, received a | |
1033 | signal, etc. and ended up causing a user-visible stop. This is | |
1034 | true iff we ever detected multiple threads. */ | |
1035 | extern int show_thread_that_caused_stop (void); | |
1036 | ||
1037 | /* Print the message for a thread or/and frame selected. */ | |
1038 | extern void print_selected_thread_frame (struct ui_out *uiout, | |
1039 | user_selected_what selection); | |
1040 | ||
1041 | /* Helper for the CLI's "thread" command and for MI's -thread-select. | |
1042 | Selects thread THR. TIDSTR is the original string the thread ID | |
1043 | was parsed from. This is used in the error message if THR is not | |
1044 | alive anymore. */ | |
1045 | extern void thread_select (const char *tidstr, class thread_info *thr); | |
1046 | ||
1047 | /* Return THREAD's name. | |
1048 | ||
1049 | If THREAD has a user-given name, return it. Otherwise, query the thread's | |
1050 | target to get the name. May return nullptr. */ | |
1051 | extern const char *thread_name (thread_info *thread); | |
1052 | ||
1053 | /* Switch to thread TP if it is alive. Returns true if successfully | |
1054 | switched, false otherwise. */ | |
1055 | ||
1056 | extern bool switch_to_thread_if_alive (thread_info *thr); | |
1057 | ||
1058 | /* Assuming that THR is the current thread, execute CMD. | |
1059 | If ADA_TASK is not empty, it is the Ada task ID, and will | |
1060 | be printed instead of the thread information. | |
1061 | FLAGS.QUIET controls the printing of the thread information. | |
1062 | FLAGS.CONT and FLAGS.SILENT control how to handle errors. Can throw an | |
1063 | exception if !FLAGS.SILENT and !FLAGS.CONT and CMD fails. */ | |
1064 | ||
1065 | extern void thread_try_catch_cmd (thread_info *thr, | |
1066 | std::optional<int> ada_task, | |
1067 | const char *cmd, int from_tty, | |
1068 | const qcs_flags &flags); | |
1069 | ||
1070 | /* Return a string representation of STATE. */ | |
1071 | ||
1072 | extern const char *thread_state_string (enum thread_state state); | |
1073 | ||
1074 | #endif /* GDB_GDBTHREAD_H */ |