]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/ravenscar-thread.c
sim: ppc: use correct macros
[thirdparty/binutils-gdb.git] / gdb / ravenscar-thread.c
CommitLineData
036b1ba8
JB
1/* Ada Ravenscar thread support.
2
d01e8234 3 Copyright (C) 2004-2025 Free Software Foundation, Inc.
036b1ba8
JB
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
ec452525 20#include "extract-store-integer.h"
036b1ba8
JB
21#include "gdbcore.h"
22#include "gdbthread.h"
23#include "ada-lang.h"
24#include "target.h"
25#include "inferior.h"
26#include "command.h"
27#include "ravenscar-thread.h"
76727919 28#include "observable.h"
5b9707eb 29#include "cli/cli-cmds.h"
036b1ba8
JB
30#include "top.h"
31#include "regcache.h"
77e371c0 32#include "objfiles.h"
aeeb41fc 33#include "gdbsupport/unordered_map.h"
036b1ba8 34
9edcc12f
JB
35/* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
e397fd39 40 about high-level concepts such as threads, only about some code
9edcc12f
JB
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
e397fd39 48 on the list of Ada tasks, with one thread per Ada task. The convention
9edcc12f 49 is that threads corresponding to the CPUs (see assumption above)
e397fd39 50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
9edcc12f
JB
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
e397fd39
TT
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
9edcc12f
JB
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
491144b5
CB
61/* If true, ravenscar task support is enabled. */
62static bool ravenscar_task_support = true;
036b1ba8 63
7f39f34a 64static const char running_thread_name[] = "__gnat_running_thread_table";
036b1ba8
JB
65
66static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
6040a59d 67static const char first_task_name[] = "system__tasking__debug__first_task";
036b1ba8 68
6cbcc006
TT
69static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
036b1ba8 71
d9f719f1
PA
72static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76};
77
f6ac5f3d
PA
78struct ravenscar_thread_target final : public target_ops
79{
0b790b1e 80 ravenscar_thread_target ()
2da4b788 81 : m_base_ptid (inferior_ptid)
0b790b1e 82 {
0b790b1e
TT
83 }
84
d9f719f1
PA
85 const target_info &info () const override
86 { return ravenscar_target_info; }
f6ac5f3d 87
66b4deae
PA
88 strata stratum () const override { return thread_stratum; }
89
b60cea74 90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
f6ac5f3d
PA
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
57810aa7 98 bool stopped_by_sw_breakpoint () override;
f6ac5f3d 99
57810aa7 100 bool stopped_by_hw_breakpoint () override;
f6ac5f3d 101
57810aa7 102 bool stopped_by_watchpoint () override;
f6ac5f3d 103
57810aa7 104 bool stopped_data_address (CORE_ADDR *) override;
f6ac5f3d 105
2080266b
TT
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
57810aa7 113 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
a068643d 119 std::string pid_to_str (ptid_t) override;
f6ac5f3d 120
c80e29db 121 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
f6ac5f3d 122
696c0d5e 123 struct btrace_target_info *enable_btrace (thread_info *tp,
2080266b
TT
124 const struct btrace_config *conf)
125 override
126 {
696c0d5e
MM
127 process_stratum_target *proc_target
128 = as_process_stratum_target (this->beneath ());
129 ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
9213a6d7 130 tp = proc_target->find_thread (underlying);
696c0d5e
MM
131
132 return beneath ()->enable_btrace (tp, conf);
2080266b
TT
133 }
134
f6ac5f3d 135 void mourn_inferior () override;
f6ac5f3d 136
0b790b1e
TT
137 void close () override
138 {
139 delete this;
140 }
141
3d4470e5
TT
142 thread_info *add_active_thread ();
143
0b790b1e 144private:
f6ac5f3d 145
0b790b1e
TT
146 /* PTID of the last thread that received an event.
147 This can be useful to determine the associated task that received
148 the event, to make it the current task. */
2da4b788 149 ptid_t m_base_ptid;
0b790b1e 150
0b790b1e
TT
151 ptid_t active_task (int cpu);
152 bool task_is_currently_active (ptid_t ptid);
153 bool runtime_initialized ();
a8ac85bb
TT
154 int get_thread_base_cpu (ptid_t ptid);
155 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
2080266b 156 void add_thread (struct ada_task_info *task);
a8ac85bb 157
a52b3ae2
TT
158 /* Like switch_to_thread, but uses the base ptid for the thread. */
159 void set_base_thread_from_ravenscar_task (ptid_t ptid)
160 {
161 process_stratum_target *proc_target
162 = as_process_stratum_target (this->beneath ());
163 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
9213a6d7 164 switch_to_thread (proc_target->find_thread (underlying));
a52b3ae2
TT
165 }
166
965b71a7
TT
167 /* Some targets use lazy FPU initialization. On these, the FP
168 registers for a given task might be uninitialized, or stored in
169 the per-task context, or simply be the live registers on the CPU.
170 This enum is used to encode this information. */
171 enum fpu_state
172 {
173 /* This target doesn't do anything special for FP registers -- if
174 any exist, they are treated just identical to non-FP
175 registers. */
176 NOTHING_SPECIAL,
177 /* This target uses the lazy FP scheme, and the FP registers are
178 taken from the CPU. This can happen for any task, because if a
179 task switch occurs, the registers aren't immediately written to
180 the per-task context -- this is deferred until the current task
181 causes an FPU trap. */
182 LIVE_FP_REGISTERS,
183 /* This target uses the lazy FP scheme, and the FP registers are
184 not available. Maybe this task never initialized the FPU, or
185 maybe GDB couldn't find the required symbol. */
186 NO_FP_REGISTERS
187 };
188
189 /* Return the FPU state. */
190 fpu_state get_fpu_state (struct regcache *regcache,
191 const ravenscar_arch_ops *arch_ops);
192
a8ac85bb
TT
193 /* This maps a TID to the CPU on which it was running. This is
194 needed because sometimes the runtime will report an active task
195 that hasn't yet been put on the list of tasks that is read by
196 ada-tasks.c. */
aeeb41fc 197 gdb::unordered_map<ULONGEST, int> m_cpu_map;
0b790b1e 198};
036b1ba8 199
989f3c58 200/* Return true iff PTID corresponds to a ravenscar task. */
9edcc12f 201
989f3c58 202static bool
9edcc12f
JB
203is_ravenscar_task (ptid_t ptid)
204{
54aa6c67
JB
205 /* By construction, ravenscar tasks have their LWP set to zero.
206 Also make sure that the TID is nonzero, as some remotes, when
207 asked for the list of threads, will return the first thread
208 as having its TID set to zero. For instance, TSIM version
209 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
210 query, which the remote protocol layer then treats as a thread
211 whose TID is 0. This is obviously not a ravenscar task. */
cc6bcb54 212 return ptid.lwp () == 0 && ptid.tid () != 0;
9edcc12f
JB
213}
214
215/* Given PTID, which can be either a ravenscar task or a CPU thread,
216 return which CPU that ptid is running on.
217
218 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
219 will be triggered. */
220
a8ac85bb
TT
221int
222ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
9edcc12f
JB
223{
224 int base_cpu;
225
226 if (is_ravenscar_task (ptid))
227 {
2080266b
TT
228 /* Prefer to not read inferior memory if possible, to avoid
229 reentrancy problems with xfer_partial. */
230 auto iter = m_cpu_map.find (ptid.tid ());
9edcc12f 231
2080266b
TT
232 if (iter != m_cpu_map.end ())
233 base_cpu = iter->second;
a8ac85bb
TT
234 else
235 {
2080266b 236 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
a8ac85bb 237
2080266b
TT
238 gdb_assert (task_info != NULL);
239 base_cpu = task_info->base_cpu;
a8ac85bb 240 }
9edcc12f
JB
241 }
242 else
243 {
244 /* We assume that the LWP of the PTID is equal to the CPU number. */
e38504b3 245 base_cpu = ptid.lwp ();
9edcc12f
JB
246 }
247
248 return base_cpu;
249}
250
989f3c58 251/* Given a ravenscar task (identified by its ptid_t PTID), return true
9edcc12f
JB
252 if this task is the currently active task on the cpu that task is
253 running on.
254
255 In other words, this function determine which CPU this task is
256 currently running on, and then return nonzero if the CPU in question
257 is executing the code for that task. If that's the case, then
258 that task's registers are in the CPU bank. Otherwise, the task
259 is currently suspended, and its registers have been saved in memory. */
260
0b790b1e
TT
261bool
262ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
9edcc12f 263{
a8ac85bb 264 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
9edcc12f 265
d7e15655 266 return ptid == active_task_ptid;
9edcc12f
JB
267}
268
269/* Return the CPU thread (as a ptid_t) on which the given ravenscar
270 task is running.
271
272 This is the thread that corresponds to the CPU on which the task
273 is running. */
274
a8ac85bb
TT
275ptid_t
276ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
9edcc12f
JB
277{
278 int base_cpu;
279
280 if (!is_ravenscar_task (ptid))
281 return ptid;
282
a8ac85bb 283 base_cpu = get_thread_base_cpu (ptid);
184ea2f7 284 return ptid_t (ptid.pid (), base_cpu);
9edcc12f
JB
285}
286
2da4b788
PA
287/* Fetch the ravenscar running thread from target memory, make sure
288 there's a corresponding thread in the thread list, and return it.
289 If the runtime is not initialized, return NULL. */
036b1ba8 290
2da4b788
PA
291thread_info *
292ravenscar_thread_target::add_active_thread ()
036b1ba8 293{
5b6d1e4f
PA
294 process_stratum_target *proc_target
295 = as_process_stratum_target (this->beneath ());
296
9edcc12f
JB
297 int base_cpu;
298
2da4b788 299 gdb_assert (!is_ravenscar_task (m_base_ptid));
a8ac85bb 300 base_cpu = get_thread_base_cpu (m_base_ptid);
9edcc12f 301
0b790b1e 302 if (!runtime_initialized ())
2da4b788 303 return nullptr;
036b1ba8 304
6bab7e67
TT
305 /* It's possible for runtime_initialized to return true but for it
306 not to be fully initialized. For example, this can happen for a
307 breakpoint placed at the task's beginning. */
2da4b788 308 ptid_t active_ptid = active_task (base_cpu);
6bab7e67
TT
309 if (active_ptid == null_ptid)
310 return nullptr;
036b1ba8
JB
311
312 /* The running thread may not have been added to
e8032dde 313 system.tasking.debug's list yet; so ravenscar_update_thread_list
036b1ba8 314 may not always add it to the thread list. Add it here. */
9213a6d7 315 thread_info *active_thr = proc_target->find_thread (active_ptid);
2da4b788 316 if (active_thr == nullptr)
a8ac85bb
TT
317 {
318 active_thr = ::add_thread (proc_target, active_ptid);
319 m_cpu_map[active_ptid.tid ()] = base_cpu;
320 }
2da4b788 321 return active_thr;
036b1ba8
JB
322}
323
7f39f34a
JB
324/* The Ravenscar Runtime exports a symbol which contains the ID of
325 the thread that is currently running. Try to locate that symbol
326 and return its associated minimal symbol.
327 Return NULL if not found. */
328
03b40f6f 329static bound_minimal_symbol
989f3c58 330get_running_thread_msymbol ()
7f39f34a 331{
4144d36a
SM
332 bound_minimal_symbol msym
333 = lookup_minimal_symbol (current_program_space, running_thread_name);
3b7344d5 334 if (!msym.minsym)
7f39f34a
JB
335 /* Older versions of the GNAT runtime were using a different
336 (less ideal) name for the symbol where the active thread ID
337 is stored. If we couldn't find the symbol using the latest
338 name, then try the old one. */
4144d36a 339 msym = lookup_minimal_symbol (current_program_space, "running_thread");
7f39f34a
JB
340
341 return msym;
342}
343
036b1ba8
JB
344/* Return True if the Ada Ravenscar run-time can be found in the
345 application. */
346
989f3c58
TT
347static bool
348has_ravenscar_runtime ()
036b1ba8 349{
03b40f6f 350 bound_minimal_symbol msym_ravenscar_runtime_initializer
4144d36a
SM
351 = lookup_minimal_symbol (current_program_space,
352 ravenscar_runtime_initializer);
03b40f6f 353 bound_minimal_symbol msym_known_tasks
4144d36a 354 = lookup_minimal_symbol (current_program_space, known_tasks_name);
03b40f6f 355 bound_minimal_symbol msym_first_task
4144d36a 356 = lookup_minimal_symbol (current_program_space, first_task_name);
03b40f6f 357 bound_minimal_symbol msym_running_thread = get_running_thread_msymbol ();
036b1ba8 358
3b7344d5
TT
359 return (msym_ravenscar_runtime_initializer.minsym
360 && (msym_known_tasks.minsym || msym_first_task.minsym)
361 && msym_running_thread.minsym);
036b1ba8
JB
362}
363
364/* Return True if the Ada Ravenscar run-time can be found in the
365 application, and if it has been initialized on target. */
366
0b790b1e
TT
367bool
368ravenscar_thread_target::runtime_initialized ()
036b1ba8 369{
0b790b1e 370 return active_task (1) != null_ptid;
036b1ba8
JB
371}
372
7f39f34a
JB
373/* Return the ID of the thread that is currently running.
374 Return 0 if the ID could not be determined. */
036b1ba8
JB
375
376static CORE_ADDR
9edcc12f 377get_running_thread_id (int cpu)
036b1ba8 378{
03b40f6f 379 bound_minimal_symbol object_msym = get_running_thread_msymbol ();
036b1ba8
JB
380 int object_size;
381 int buf_size;
948f8e3d 382 gdb_byte *buf;
036b1ba8 383 CORE_ADDR object_addr;
6cbcc006 384 struct type *builtin_type_void_data_ptr
99d9c3b9 385 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr;
036b1ba8 386
3b7344d5 387 if (!object_msym.minsym)
036b1ba8
JB
388 return 0;
389
df86565b 390 object_size = builtin_type_void_data_ptr->length ();
4aeddc50 391 object_addr = (object_msym.value_address ()
9edcc12f 392 + (cpu - 1) * object_size);
036b1ba8 393 buf_size = object_size;
224c3ddb 394 buf = (gdb_byte *) alloca (buf_size);
036b1ba8
JB
395 read_memory (object_addr, buf, buf_size);
396 return extract_typed_address (buf, builtin_type_void_data_ptr);
397}
398
f6ac5f3d 399void
6cbcc006
TT
400ravenscar_thread_target::resume (ptid_t ptid, int step,
401 enum gdb_signal siggnal)
036b1ba8 402{
485b851b
TT
403 /* If we see a wildcard resume, we simply pass that on. Otherwise,
404 arrange to resume the base ptid. */
0b790b1e 405 inferior_ptid = m_base_ptid;
39e2018a
TT
406 if (ptid.is_pid ())
407 {
408 /* We only have one process, so resume all threads of it. */
409 ptid = minus_one_ptid;
410 }
411 else if (ptid != minus_one_ptid)
485b851b
TT
412 ptid = m_base_ptid;
413 beneath ()->resume (ptid, step, siggnal);
036b1ba8
JB
414}
415
f6ac5f3d
PA
416ptid_t
417ravenscar_thread_target::wait (ptid_t ptid,
418 struct target_waitstatus *status,
b60cea74 419 target_wait_flags options)
036b1ba8 420{
5b6d1e4f
PA
421 process_stratum_target *beneath
422 = as_process_stratum_target (this->beneath ());
3b1b69bf 423 ptid_t event_ptid;
036b1ba8 424
485b851b
TT
425 if (ptid != minus_one_ptid)
426 ptid = m_base_ptid;
5b6d1e4f 427 event_ptid = beneath->wait (ptid, status, 0);
2da4b788
PA
428 /* Find any new threads that might have been created, and return the
429 active thread.
bed0c243
JB
430
431 Only do it if the program is still alive, though. Otherwise,
432 this causes problems when debugging through the remote protocol,
433 because we might try switching threads (and thus sending packets)
434 after the remote has disconnected. */
183be222
SM
435 if (status->kind () != TARGET_WAITKIND_EXITED
436 && status->kind () != TARGET_WAITKIND_SIGNALLED
e9546579 437 && runtime_initialized ())
bed0c243 438 {
2da4b788 439 m_base_ptid = event_ptid;
f6ac5f3d 440 this->update_thread_list ();
6bab7e67
TT
441 thread_info *thr = this->add_active_thread ();
442 if (thr != nullptr)
443 return thr->ptid;
bed0c243 444 }
550ab58d 445 return event_ptid;
036b1ba8
JB
446}
447
448/* Add the thread associated to the given TASK to the thread list
449 (if the thread has already been added, this is a no-op). */
450
2080266b
TT
451void
452ravenscar_thread_target::add_thread (struct ada_task_info *task)
036b1ba8 453{
3c8af02f 454 if (current_inferior ()->find_thread (task->ptid) == NULL)
2080266b
TT
455 {
456 ::add_thread (current_inferior ()->process_target (), task->ptid);
457 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
458 }
036b1ba8
JB
459}
460
f6ac5f3d
PA
461void
462ravenscar_thread_target::update_thread_list ()
036b1ba8 463{
0e29517d
TT
464 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
465 but this isn't always the case in target methods. So, we ensure
466 it here. */
467 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
468 m_base_ptid);
469
036b1ba8
JB
470 /* Do not clear the thread list before adding the Ada task, to keep
471 the thread that the process stratum has included into it
0b790b1e 472 (m_base_ptid) and the running thread, that may not have been included
036b1ba8
JB
473 to system.tasking.debug's list yet. */
474
5bd5fecd 475 iterate_over_live_ada_tasks ([this] (struct ada_task_info *task)
2080266b
TT
476 {
477 this->add_thread (task);
478 });
036b1ba8
JB
479}
480
0b790b1e
TT
481ptid_t
482ravenscar_thread_target::active_task (int cpu)
036b1ba8 483{
9edcc12f 484 CORE_ADDR tid = get_running_thread_id (cpu);
036b1ba8
JB
485
486 if (tid == 0)
487 return null_ptid;
488 else
0b790b1e 489 return ptid_t (m_base_ptid.pid (), 0, tid);
036b1ba8
JB
490}
491
57810aa7 492bool
f6ac5f3d 493ravenscar_thread_target::thread_alive (ptid_t ptid)
036b1ba8
JB
494{
495 /* Ravenscar tasks are non-terminating. */
57810aa7 496 return true;
036b1ba8
JB
497}
498
a068643d 499std::string
f6ac5f3d 500ravenscar_thread_target::pid_to_str (ptid_t ptid)
036b1ba8 501{
d5d833af
TT
502 if (!is_ravenscar_task (ptid))
503 return beneath ()->pid_to_str (ptid);
504
96bbe3ef 505 return string_printf ("Ravenscar Thread 0x%s",
9c1f84c9 506 phex_nz (ptid.tid ()));
036b1ba8
JB
507}
508
e73434e3
TT
509CORE_ADDR
510ravenscar_arch_ops::get_stack_base (struct regcache *regcache) const
511{
512 struct gdbarch *gdbarch = regcache->arch ();
513 const int sp_regnum = gdbarch_sp_regnum (gdbarch);
514 ULONGEST stack_address;
515 regcache_cooked_read_unsigned (regcache, sp_regnum, &stack_address);
516 return (CORE_ADDR) stack_address;
517}
518
519void
520ravenscar_arch_ops::supply_one_register (struct regcache *regcache,
521 int regnum,
522 CORE_ADDR descriptor,
523 CORE_ADDR stack_base) const
524{
525 CORE_ADDR addr;
526 if (regnum >= first_stack_register && regnum <= last_stack_register)
527 addr = stack_base;
528 else
529 addr = descriptor;
530 addr += offsets[regnum];
531
532 struct gdbarch *gdbarch = regcache->arch ();
533 int size = register_size (gdbarch, regnum);
534 gdb_byte *buf = (gdb_byte *) alloca (size);
535 read_memory (addr, buf, size);
536 regcache->raw_supply (regnum, buf);
537}
538
539void
965b71a7
TT
540ravenscar_arch_ops::fetch_register (struct regcache *regcache,
541 int regnum) const
e73434e3 542{
965b71a7
TT
543 gdb_assert (regnum != -1);
544
e73434e3
TT
545 struct gdbarch *gdbarch = regcache->arch ();
546 /* The tid is the thread_id field, which is a pointer to the thread. */
547 CORE_ADDR thread_descriptor_address
548 = (CORE_ADDR) regcache->ptid ().tid ();
549
550 int sp_regno = -1;
551 CORE_ADDR stack_address = 0;
965b71a7 552 if (regnum >= first_stack_register && regnum <= last_stack_register)
e73434e3
TT
553 {
554 /* We must supply SP for get_stack_base, so recurse. */
555 sp_regno = gdbarch_sp_regnum (gdbarch);
556 gdb_assert (!(sp_regno >= first_stack_register
557 && sp_regno <= last_stack_register));
965b71a7 558 fetch_register (regcache, sp_regno);
e73434e3
TT
559 stack_address = get_stack_base (regcache);
560 }
561
965b71a7 562 if (regnum < offsets.size () && offsets[regnum] != -1)
e73434e3
TT
563 supply_one_register (regcache, regnum, thread_descriptor_address,
564 stack_address);
565}
566
567void
568ravenscar_arch_ops::store_one_register (struct regcache *regcache, int regnum,
569 CORE_ADDR descriptor,
570 CORE_ADDR stack_base) const
571{
572 CORE_ADDR addr;
573 if (regnum >= first_stack_register && regnum <= last_stack_register)
574 addr = stack_base;
575 else
576 addr = descriptor;
577 addr += offsets[regnum];
578
579 struct gdbarch *gdbarch = regcache->arch ();
580 int size = register_size (gdbarch, regnum);
581 gdb_byte *buf = (gdb_byte *) alloca (size);
582 regcache->raw_collect (regnum, buf);
583 write_memory (addr, buf, size);
584}
585
586void
965b71a7
TT
587ravenscar_arch_ops::store_register (struct regcache *regcache,
588 int regnum) const
e73434e3 589{
965b71a7
TT
590 gdb_assert (regnum != -1);
591
e73434e3
TT
592 /* The tid is the thread_id field, which is a pointer to the thread. */
593 CORE_ADDR thread_descriptor_address
594 = (CORE_ADDR) regcache->ptid ().tid ();
595
596 CORE_ADDR stack_address = 0;
965b71a7 597 if (regnum >= first_stack_register && regnum <= last_stack_register)
e73434e3
TT
598 stack_address = get_stack_base (regcache);
599
965b71a7 600 if (regnum < offsets.size () && offsets[regnum] != -1)
e73434e3
TT
601 store_one_register (regcache, regnum, thread_descriptor_address,
602 stack_address);
603}
604
592f9bd7
TT
605/* Temporarily set the ptid of a regcache to some other value. When
606 this object is destroyed, the regcache's original ptid is
607 restored. */
608
609class temporarily_change_regcache_ptid
610{
611public:
612
613 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
614 : m_regcache (regcache),
615 m_save_ptid (regcache->ptid ())
616 {
617 m_regcache->set_ptid (new_ptid);
618 }
619
620 ~temporarily_change_regcache_ptid ()
621 {
622 m_regcache->set_ptid (m_save_ptid);
623 }
624
625private:
626
627 /* The regcache. */
628 struct regcache *m_regcache;
629 /* The saved ptid. */
630 ptid_t m_save_ptid;
631};
632
965b71a7
TT
633ravenscar_thread_target::fpu_state
634ravenscar_thread_target::get_fpu_state (struct regcache *regcache,
635 const ravenscar_arch_ops *arch_ops)
636{
637 /* We want to return true if the special FP register handling is
638 needed. If this target doesn't have lazy FP, then no special
639 treatment is ever needed. */
640 if (!arch_ops->on_demand_fp ())
641 return NOTHING_SPECIAL;
642
643 bound_minimal_symbol fpu_context
4144d36a
SM
644 = lookup_minimal_symbol (current_program_space,
645 "system__bb__cpu_primitives__current_fpu_context",
965b71a7
TT
646 nullptr, nullptr);
647 /* If the symbol can't be found, just fall back. */
648 if (fpu_context.minsym == nullptr)
649 return NO_FP_REGISTERS;
650
99d9c3b9
SM
651 type *ptr_type
652 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr;
965b71a7
TT
653 ptr_type = lookup_pointer_type (ptr_type);
654 value *val = value_from_pointer (ptr_type, fpu_context.value_address ());
655
656 int cpu = get_thread_base_cpu (regcache->ptid ());
657 /* The array index type has a lower bound of 1 -- it is Ada code --
658 so subtract 1 here. */
659 val = value_ptradd (val, cpu - 1);
660
661 val = value_ind (val);
662 CORE_ADDR fpu_task = value_as_long (val);
663
664 /* The tid is the thread_id field, which is a pointer to the thread. */
665 CORE_ADDR thread_descriptor_address
666 = (CORE_ADDR) regcache->ptid ().tid ();
667 if (fpu_task == (thread_descriptor_address
668 + arch_ops->get_fpu_context_offset ()))
669 return LIVE_FP_REGISTERS;
670
671 int v_init_offset = arch_ops->get_v_init_offset ();
672 gdb_byte init = 0;
673 read_memory (thread_descriptor_address + v_init_offset, &init, 1);
674 return init ? NOTHING_SPECIAL : NO_FP_REGISTERS;
675}
676
f6ac5f3d 677void
e73434e3
TT
678ravenscar_thread_target::fetch_registers (struct regcache *regcache,
679 int regnum)
036b1ba8 680{
222312d3 681 ptid_t ptid = regcache->ptid ();
036b1ba8 682
592f9bd7 683 if (runtime_initialized () && is_ravenscar_task (ptid))
7e35103a 684 {
965b71a7
TT
685 struct gdbarch *gdbarch = regcache->arch ();
686 bool is_active = task_is_currently_active (ptid);
687 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
6b09f134 688 std::optional<fpu_state> fp_state;
7e35103a 689
965b71a7
TT
690 int low_reg = regnum == -1 ? 0 : regnum;
691 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
692
693 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
694 for (int i = low_reg; i < high_reg; ++i)
695 {
696 bool use_beneath = false;
697 if (arch_ops->is_fp_register (i))
698 {
699 if (!fp_state.has_value ())
700 fp_state = get_fpu_state (regcache, arch_ops);
701 if (*fp_state == NO_FP_REGISTERS)
702 continue;
703 if (*fp_state == LIVE_FP_REGISTERS
704 || (is_active && *fp_state == NOTHING_SPECIAL))
705 use_beneath = true;
706 }
707 else
708 use_beneath = is_active;
709
710 if (use_beneath)
711 {
712 temporarily_change_regcache_ptid changer (regcache, base);
713 beneath ()->fetch_registers (regcache, i);
714 }
715 else
716 arch_ops->fetch_register (regcache, i);
592f9bd7 717 }
7e35103a 718 }
9edcc12f 719 else
d6ca69cd 720 beneath ()->fetch_registers (regcache, regnum);
036b1ba8
JB
721}
722
f6ac5f3d
PA
723void
724ravenscar_thread_target::store_registers (struct regcache *regcache,
725 int regnum)
036b1ba8 726{
222312d3 727 ptid_t ptid = regcache->ptid ();
036b1ba8 728
592f9bd7 729 if (runtime_initialized () && is_ravenscar_task (ptid))
7e35103a 730 {
965b71a7
TT
731 struct gdbarch *gdbarch = regcache->arch ();
732 bool is_active = task_is_currently_active (ptid);
733 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
6b09f134 734 std::optional<fpu_state> fp_state;
965b71a7
TT
735
736 int low_reg = regnum == -1 ? 0 : regnum;
737 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
7e35103a 738
965b71a7
TT
739 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
740 for (int i = low_reg; i < high_reg; ++i)
741 {
742 bool use_beneath = false;
743 if (arch_ops->is_fp_register (i))
744 {
745 if (!fp_state.has_value ())
746 fp_state = get_fpu_state (regcache, arch_ops);
747 if (*fp_state == NO_FP_REGISTERS)
748 continue;
749 if (*fp_state == LIVE_FP_REGISTERS
750 || (is_active && *fp_state == NOTHING_SPECIAL))
751 use_beneath = true;
752 }
753 else
754 use_beneath = is_active;
755
756 if (use_beneath)
757 {
758 temporarily_change_regcache_ptid changer (regcache, base);
759 beneath ()->store_registers (regcache, i);
760 }
761 else
762 arch_ops->store_register (regcache, i);
592f9bd7 763 }
7e35103a 764 }
9edcc12f 765 else
d6ca69cd 766 beneath ()->store_registers (regcache, regnum);
036b1ba8
JB
767}
768
f6ac5f3d
PA
769void
770ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
036b1ba8 771{
222312d3 772 ptid_t ptid = regcache->ptid ();
036b1ba8 773
592f9bd7 774 if (runtime_initialized () && is_ravenscar_task (ptid))
7e35103a 775 {
592f9bd7
TT
776 if (task_is_currently_active (ptid))
777 {
778 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
779 temporarily_change_regcache_ptid changer (regcache, base);
780 beneath ()->prepare_to_store (regcache);
781 }
782 else
783 {
784 /* Nothing. */
785 }
7e35103a 786 }
9edcc12f 787 else
d6ca69cd 788 beneath ()->prepare_to_store (regcache);
036b1ba8
JB
789}
790
e02544b2
JB
791/* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
792
57810aa7 793bool
f6ac5f3d 794ravenscar_thread_target::stopped_by_sw_breakpoint ()
e02544b2 795{
a52b3ae2
TT
796 scoped_restore_current_thread saver;
797 set_base_thread_from_ravenscar_task (inferior_ptid);
5b6ea500 798 return beneath ()->stopped_by_sw_breakpoint ();
e02544b2
JB
799}
800
801/* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
802
57810aa7 803bool
f6ac5f3d 804ravenscar_thread_target::stopped_by_hw_breakpoint ()
e02544b2 805{
a52b3ae2
TT
806 scoped_restore_current_thread saver;
807 set_base_thread_from_ravenscar_task (inferior_ptid);
5b6ea500 808 return beneath ()->stopped_by_hw_breakpoint ();
e02544b2
JB
809}
810
811/* Implement the to_stopped_by_watchpoint target_ops "method". */
812
57810aa7 813bool
f6ac5f3d 814ravenscar_thread_target::stopped_by_watchpoint ()
e02544b2 815{
a52b3ae2
TT
816 scoped_restore_current_thread saver;
817 set_base_thread_from_ravenscar_task (inferior_ptid);
5b6ea500 818 return beneath ()->stopped_by_watchpoint ();
e02544b2
JB
819}
820
821/* Implement the to_stopped_data_address target_ops "method". */
822
57810aa7 823bool
f6ac5f3d 824ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
e02544b2 825{
a52b3ae2
TT
826 scoped_restore_current_thread saver;
827 set_base_thread_from_ravenscar_task (inferior_ptid);
5b6ea500 828 return beneath ()->stopped_data_address (addr_p);
e02544b2
JB
829}
830
f6ac5f3d
PA
831void
832ravenscar_thread_target::mourn_inferior ()
036b1ba8 833{
0b790b1e 834 m_base_ptid = null_ptid;
fd9faca8 835 target_ops *beneath = this->beneath ();
fadf6add 836 current_inferior ()->unpush_target (this);
fd9faca8 837 beneath->mourn_inferior ();
036b1ba8
JB
838}
839
e02544b2
JB
840/* Implement the to_core_of_thread target_ops "method". */
841
f6ac5f3d
PA
842int
843ravenscar_thread_target::core_of_thread (ptid_t ptid)
e02544b2 844{
a52b3ae2
TT
845 scoped_restore_current_thread saver;
846 set_base_thread_from_ravenscar_task (inferior_ptid);
5b6ea500 847 return beneath ()->core_of_thread (inferior_ptid);
e02544b2
JB
848}
849
2080266b
TT
850/* Implement the target xfer_partial method. */
851
852enum target_xfer_status
853ravenscar_thread_target::xfer_partial (enum target_object object,
854 const char *annex,
855 gdb_byte *readbuf,
856 const gdb_byte *writebuf,
857 ULONGEST offset, ULONGEST len,
858 ULONGEST *xfered_len)
859{
860 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
861 /* Calling get_base_thread_from_ravenscar_task can read memory from
862 the inferior. However, that function is written to prefer our
863 internal map, so it should not result in recursive calls in
864 practice. */
865 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
866 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
867 offset, len, xfered_len);
868}
869
036b1ba8
JB
870/* Observer on inferior_created: push ravenscar thread stratum if needed. */
871
872static void
a0ff652f 873ravenscar_inferior_created (inferior *inf)
036b1ba8 874{
cf3fbed4 875 const char *err_msg;
7e35103a
JB
876
877 if (!ravenscar_task_support
99d9c3b9 878 || gdbarch_ravenscar_ops (current_inferior ()->arch ()) == NULL
7e35103a 879 || !has_ravenscar_runtime ())
25abf4de
JB
880 return;
881
cf3fbed4
JB
882 err_msg = ada_get_tcb_types_info ();
883 if (err_msg != NULL)
884 {
8f6cb6c3 885 warning (_("%s. Task/thread support disabled."), err_msg);
cf3fbed4
JB
886 return;
887 }
888
3d4470e5 889 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
02980c56 890 inf->push_target (target_ops_up (rtarget));
3d4470e5
TT
891 thread_info *thr = rtarget->add_active_thread ();
892 if (thr != nullptr)
893 switch_to_thread (thr);
036b1ba8
JB
894}
895
f6ac5f3d 896ptid_t
c80e29db 897ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
036b1ba8 898{
0b790b1e 899 return ptid_t (m_base_ptid.pid (), 0, thread);
036b1ba8
JB
900}
901
036b1ba8
JB
902/* Command-list for the "set/show ravenscar" prefix command. */
903static struct cmd_list_element *set_ravenscar_list;
904static struct cmd_list_element *show_ravenscar_list;
905
036b1ba8
JB
906/* Implement the "show ravenscar task-switching" command. */
907
908static void
909show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
910 struct cmd_list_element *c,
911 const char *value)
912{
913 if (ravenscar_task_support)
6cb06a8c 914 gdb_printf (file, _("\
b64edec4 915Support for Ravenscar task/thread switching is enabled\n"));
036b1ba8 916 else
6cb06a8c 917 gdb_printf (file, _("\
b64edec4 918Support for Ravenscar task/thread switching is disabled\n"));
036b1ba8
JB
919}
920
921/* Module startup initialization function, automagically called by
922 init.c. */
923
5fe70629 924INIT_GDB_FILE (ravenscar)
036b1ba8 925{
036b1ba8
JB
926 /* Notice when the inferior is created in order to push the
927 ravenscar ops if needed. */
c90e7d63
SM
928 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
929 "ravenscar-thread");
036b1ba8 930
f54bdb6d
SM
931 add_setshow_prefix_cmd
932 ("ravenscar", no_class,
933 _("Prefix command for changing Ravenscar-specific settings."),
934 _("Prefix command for showing Ravenscar-specific settings."),
935 &set_ravenscar_list, &show_ravenscar_list,
936 &setlist, &showlist);
036b1ba8
JB
937
938 add_setshow_boolean_cmd ("task-switching", class_obscure,
dda83cd7 939 &ravenscar_task_support, _("\
590042fc
PW
940Enable or disable support for GNAT Ravenscar tasks."), _("\
941Show whether support for GNAT Ravenscar tasks is enabled."),
dda83cd7 942 _("\
036b1ba8
JB
943Enable or disable support for task/thread switching with the GNAT\n\
944Ravenscar run-time library for bareboard configuration."),
945 NULL, show_ravenscar_task_switching_command,
946 &set_ravenscar_list, &show_ravenscar_list);
947}