]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/ravenscar-thread.c
459e5ea3ed3822342806a429fca9f4f41c90bca2
[thirdparty/binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
63
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
71
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76 };
77
78 struct ravenscar_thread_target final : public target_ops
79 {
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
82 {
83 }
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, long thread) override;
122
123 struct btrace_target_info *enable_btrace (ptid_t ptid,
124 const struct btrace_config *conf)
125 override
126 {
127 ptid = get_base_thread_from_ravenscar_task (ptid);
128 return beneath ()->enable_btrace (ptid, conf);
129 }
130
131 void mourn_inferior () override;
132
133 void close () override
134 {
135 delete this;
136 }
137
138 thread_info *add_active_thread ();
139
140 private:
141
142 /* PTID of the last thread that received an event.
143 This can be useful to determine the associated task that received
144 the event, to make it the current task. */
145 ptid_t m_base_ptid;
146
147 ptid_t active_task (int cpu);
148 bool task_is_currently_active (ptid_t ptid);
149 bool runtime_initialized ();
150 int get_thread_base_cpu (ptid_t ptid);
151 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
152 void add_thread (struct ada_task_info *task);
153
154 /* This maps a TID to the CPU on which it was running. This is
155 needed because sometimes the runtime will report an active task
156 that hasn't yet been put on the list of tasks that is read by
157 ada-tasks.c. */
158 std::unordered_map<long, int> m_cpu_map;
159 };
160
161 /* Return true iff PTID corresponds to a ravenscar task. */
162
163 static bool
164 is_ravenscar_task (ptid_t ptid)
165 {
166 /* By construction, ravenscar tasks have their LWP set to zero.
167 Also make sure that the TID is nonzero, as some remotes, when
168 asked for the list of threads, will return the first thread
169 as having its TID set to zero. For instance, TSIM version
170 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
171 query, which the remote protocol layer then treats as a thread
172 whose TID is 0. This is obviously not a ravenscar task. */
173 return ptid.lwp () == 0 && ptid.tid () != 0;
174 }
175
176 /* Given PTID, which can be either a ravenscar task or a CPU thread,
177 return which CPU that ptid is running on.
178
179 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
180 will be triggered. */
181
182 int
183 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
184 {
185 int base_cpu;
186
187 if (is_ravenscar_task (ptid))
188 {
189 /* Prefer to not read inferior memory if possible, to avoid
190 reentrancy problems with xfer_partial. */
191 auto iter = m_cpu_map.find (ptid.tid ());
192
193 if (iter != m_cpu_map.end ())
194 base_cpu = iter->second;
195 else
196 {
197 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
198
199 gdb_assert (task_info != NULL);
200 base_cpu = task_info->base_cpu;
201 }
202 }
203 else
204 {
205 /* We assume that the LWP of the PTID is equal to the CPU number. */
206 base_cpu = ptid.lwp ();
207 }
208
209 return base_cpu;
210 }
211
212 /* Given a ravenscar task (identified by its ptid_t PTID), return true
213 if this task is the currently active task on the cpu that task is
214 running on.
215
216 In other words, this function determine which CPU this task is
217 currently running on, and then return nonzero if the CPU in question
218 is executing the code for that task. If that's the case, then
219 that task's registers are in the CPU bank. Otherwise, the task
220 is currently suspended, and its registers have been saved in memory. */
221
222 bool
223 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
224 {
225 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
226
227 return ptid == active_task_ptid;
228 }
229
230 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
231 task is running.
232
233 This is the thread that corresponds to the CPU on which the task
234 is running. */
235
236 ptid_t
237 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
238 {
239 int base_cpu;
240
241 if (!is_ravenscar_task (ptid))
242 return ptid;
243
244 base_cpu = get_thread_base_cpu (ptid);
245 return ptid_t (ptid.pid (), base_cpu, 0);
246 }
247
248 /* Fetch the ravenscar running thread from target memory, make sure
249 there's a corresponding thread in the thread list, and return it.
250 If the runtime is not initialized, return NULL. */
251
252 thread_info *
253 ravenscar_thread_target::add_active_thread ()
254 {
255 process_stratum_target *proc_target
256 = as_process_stratum_target (this->beneath ());
257
258 int base_cpu;
259
260 gdb_assert (!is_ravenscar_task (m_base_ptid));
261 base_cpu = get_thread_base_cpu (m_base_ptid);
262
263 if (!runtime_initialized ())
264 return nullptr;
265
266 /* Make sure we set m_base_ptid before calling active_task
267 as the latter relies on it. */
268 ptid_t active_ptid = active_task (base_cpu);
269 gdb_assert (active_ptid != null_ptid);
270
271 /* The running thread may not have been added to
272 system.tasking.debug's list yet; so ravenscar_update_thread_list
273 may not always add it to the thread list. Add it here. */
274 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
275 if (active_thr == nullptr)
276 {
277 active_thr = ::add_thread (proc_target, active_ptid);
278 m_cpu_map[active_ptid.tid ()] = base_cpu;
279 }
280 return active_thr;
281 }
282
283 /* The Ravenscar Runtime exports a symbol which contains the ID of
284 the thread that is currently running. Try to locate that symbol
285 and return its associated minimal symbol.
286 Return NULL if not found. */
287
288 static struct bound_minimal_symbol
289 get_running_thread_msymbol ()
290 {
291 struct bound_minimal_symbol msym;
292
293 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
294 if (!msym.minsym)
295 /* Older versions of the GNAT runtime were using a different
296 (less ideal) name for the symbol where the active thread ID
297 is stored. If we couldn't find the symbol using the latest
298 name, then try the old one. */
299 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
300
301 return msym;
302 }
303
304 /* Return True if the Ada Ravenscar run-time can be found in the
305 application. */
306
307 static bool
308 has_ravenscar_runtime ()
309 {
310 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
311 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
312 struct bound_minimal_symbol msym_known_tasks
313 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
314 struct bound_minimal_symbol msym_first_task
315 = lookup_minimal_symbol (first_task_name, NULL, NULL);
316 struct bound_minimal_symbol msym_running_thread
317 = get_running_thread_msymbol ();
318
319 return (msym_ravenscar_runtime_initializer.minsym
320 && (msym_known_tasks.minsym || msym_first_task.minsym)
321 && msym_running_thread.minsym);
322 }
323
324 /* Return True if the Ada Ravenscar run-time can be found in the
325 application, and if it has been initialized on target. */
326
327 bool
328 ravenscar_thread_target::runtime_initialized ()
329 {
330 return active_task (1) != null_ptid;
331 }
332
333 /* Return the ID of the thread that is currently running.
334 Return 0 if the ID could not be determined. */
335
336 static CORE_ADDR
337 get_running_thread_id (int cpu)
338 {
339 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
340 int object_size;
341 int buf_size;
342 gdb_byte *buf;
343 CORE_ADDR object_addr;
344 struct type *builtin_type_void_data_ptr
345 = builtin_type (target_gdbarch ())->builtin_data_ptr;
346
347 if (!object_msym.minsym)
348 return 0;
349
350 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
351 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
352 + (cpu - 1) * object_size);
353 buf_size = object_size;
354 buf = (gdb_byte *) alloca (buf_size);
355 read_memory (object_addr, buf, buf_size);
356 return extract_typed_address (buf, builtin_type_void_data_ptr);
357 }
358
359 void
360 ravenscar_thread_target::resume (ptid_t ptid, int step,
361 enum gdb_signal siggnal)
362 {
363 /* If we see a wildcard resume, we simply pass that on. Otherwise,
364 arrange to resume the base ptid. */
365 inferior_ptid = m_base_ptid;
366 if (ptid.is_pid ())
367 {
368 /* We only have one process, so resume all threads of it. */
369 ptid = minus_one_ptid;
370 }
371 else if (ptid != minus_one_ptid)
372 ptid = m_base_ptid;
373 beneath ()->resume (ptid, step, siggnal);
374 }
375
376 ptid_t
377 ravenscar_thread_target::wait (ptid_t ptid,
378 struct target_waitstatus *status,
379 int options)
380 {
381 process_stratum_target *beneath
382 = as_process_stratum_target (this->beneath ());
383 ptid_t event_ptid;
384
385 if (ptid != minus_one_ptid)
386 ptid = m_base_ptid;
387 event_ptid = beneath->wait (ptid, status, 0);
388 /* Find any new threads that might have been created, and return the
389 active thread.
390
391 Only do it if the program is still alive, though. Otherwise,
392 this causes problems when debugging through the remote protocol,
393 because we might try switching threads (and thus sending packets)
394 after the remote has disconnected. */
395 if (status->kind != TARGET_WAITKIND_EXITED
396 && status->kind != TARGET_WAITKIND_SIGNALLED
397 && runtime_initialized ())
398 {
399 m_base_ptid = event_ptid;
400 this->update_thread_list ();
401 return this->add_active_thread ()->ptid;
402 }
403 return event_ptid;
404 }
405
406 /* Add the thread associated to the given TASK to the thread list
407 (if the thread has already been added, this is a no-op). */
408
409 void
410 ravenscar_thread_target::add_thread (struct ada_task_info *task)
411 {
412 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
413 {
414 ::add_thread (current_inferior ()->process_target (), task->ptid);
415 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
416 }
417 }
418
419 void
420 ravenscar_thread_target::update_thread_list ()
421 {
422 /* Do not clear the thread list before adding the Ada task, to keep
423 the thread that the process stratum has included into it
424 (m_base_ptid) and the running thread, that may not have been included
425 to system.tasking.debug's list yet. */
426
427 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
428 {
429 this->add_thread (task);
430 });
431 }
432
433 ptid_t
434 ravenscar_thread_target::active_task (int cpu)
435 {
436 CORE_ADDR tid = get_running_thread_id (cpu);
437
438 if (tid == 0)
439 return null_ptid;
440 else
441 return ptid_t (m_base_ptid.pid (), 0, tid);
442 }
443
444 bool
445 ravenscar_thread_target::thread_alive (ptid_t ptid)
446 {
447 /* Ravenscar tasks are non-terminating. */
448 return true;
449 }
450
451 std::string
452 ravenscar_thread_target::pid_to_str (ptid_t ptid)
453 {
454 if (!is_ravenscar_task (ptid))
455 return beneath ()->pid_to_str (ptid);
456
457 return string_printf ("Ravenscar Thread %#x", (int) ptid.tid ());
458 }
459
460 /* Temporarily set the ptid of a regcache to some other value. When
461 this object is destroyed, the regcache's original ptid is
462 restored. */
463
464 class temporarily_change_regcache_ptid
465 {
466 public:
467
468 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
469 : m_regcache (regcache),
470 m_save_ptid (regcache->ptid ())
471 {
472 m_regcache->set_ptid (new_ptid);
473 }
474
475 ~temporarily_change_regcache_ptid ()
476 {
477 m_regcache->set_ptid (m_save_ptid);
478 }
479
480 private:
481
482 /* The regcache. */
483 struct regcache *m_regcache;
484 /* The saved ptid. */
485 ptid_t m_save_ptid;
486 };
487
488 void
489 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
490 {
491 ptid_t ptid = regcache->ptid ();
492
493 if (runtime_initialized () && is_ravenscar_task (ptid))
494 {
495 if (task_is_currently_active (ptid))
496 {
497 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
498 temporarily_change_regcache_ptid changer (regcache, base);
499 beneath ()->fetch_registers (regcache, regnum);
500 }
501 else
502 {
503 struct gdbarch *gdbarch = regcache->arch ();
504 struct ravenscar_arch_ops *arch_ops
505 = gdbarch_ravenscar_ops (gdbarch);
506
507 arch_ops->fetch_registers (regcache, regnum);
508 }
509 }
510 else
511 beneath ()->fetch_registers (regcache, regnum);
512 }
513
514 void
515 ravenscar_thread_target::store_registers (struct regcache *regcache,
516 int regnum)
517 {
518 ptid_t ptid = regcache->ptid ();
519
520 if (runtime_initialized () && is_ravenscar_task (ptid))
521 {
522 if (task_is_currently_active (ptid))
523 {
524 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
525 temporarily_change_regcache_ptid changer (regcache, base);
526 beneath ()->store_registers (regcache, regnum);
527 }
528 else
529 {
530 struct gdbarch *gdbarch = regcache->arch ();
531 struct ravenscar_arch_ops *arch_ops
532 = gdbarch_ravenscar_ops (gdbarch);
533
534 arch_ops->store_registers (regcache, regnum);
535 }
536 }
537 else
538 beneath ()->store_registers (regcache, regnum);
539 }
540
541 void
542 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
543 {
544 ptid_t ptid = regcache->ptid ();
545
546 if (runtime_initialized () && is_ravenscar_task (ptid))
547 {
548 if (task_is_currently_active (ptid))
549 {
550 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
551 temporarily_change_regcache_ptid changer (regcache, base);
552 beneath ()->prepare_to_store (regcache);
553 }
554 else
555 {
556 /* Nothing. */
557 }
558 }
559 else
560 beneath ()->prepare_to_store (regcache);
561 }
562
563 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
564
565 bool
566 ravenscar_thread_target::stopped_by_sw_breakpoint ()
567 {
568 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
569 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
570 return beneath ()->stopped_by_sw_breakpoint ();
571 }
572
573 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
574
575 bool
576 ravenscar_thread_target::stopped_by_hw_breakpoint ()
577 {
578 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
579 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
580 return beneath ()->stopped_by_hw_breakpoint ();
581 }
582
583 /* Implement the to_stopped_by_watchpoint target_ops "method". */
584
585 bool
586 ravenscar_thread_target::stopped_by_watchpoint ()
587 {
588 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
589 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
590 return beneath ()->stopped_by_watchpoint ();
591 }
592
593 /* Implement the to_stopped_data_address target_ops "method". */
594
595 bool
596 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
597 {
598 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
599 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
600 return beneath ()->stopped_data_address (addr_p);
601 }
602
603 void
604 ravenscar_thread_target::mourn_inferior ()
605 {
606 m_base_ptid = null_ptid;
607 target_ops *beneath = this->beneath ();
608 unpush_target (this);
609 beneath->mourn_inferior ();
610 }
611
612 /* Implement the to_core_of_thread target_ops "method". */
613
614 int
615 ravenscar_thread_target::core_of_thread (ptid_t ptid)
616 {
617 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
618 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
619 return beneath ()->core_of_thread (inferior_ptid);
620 }
621
622 /* Implement the target xfer_partial method. */
623
624 enum target_xfer_status
625 ravenscar_thread_target::xfer_partial (enum target_object object,
626 const char *annex,
627 gdb_byte *readbuf,
628 const gdb_byte *writebuf,
629 ULONGEST offset, ULONGEST len,
630 ULONGEST *xfered_len)
631 {
632 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
633 /* Calling get_base_thread_from_ravenscar_task can read memory from
634 the inferior. However, that function is written to prefer our
635 internal map, so it should not result in recursive calls in
636 practice. */
637 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
638 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
639 offset, len, xfered_len);
640 }
641
642 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
643
644 static void
645 ravenscar_inferior_created (struct target_ops *target, int from_tty)
646 {
647 const char *err_msg;
648
649 if (!ravenscar_task_support
650 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
651 || !has_ravenscar_runtime ())
652 return;
653
654 err_msg = ada_get_tcb_types_info ();
655 if (err_msg != NULL)
656 {
657 warning (_("%s. Task/thread support disabled."), err_msg);
658 return;
659 }
660
661 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
662 push_target (target_ops_up (rtarget));
663 thread_info *thr = rtarget->add_active_thread ();
664 if (thr != nullptr)
665 switch_to_thread (thr);
666 }
667
668 ptid_t
669 ravenscar_thread_target::get_ada_task_ptid (long lwp, long thread)
670 {
671 return ptid_t (m_base_ptid.pid (), 0, thread);
672 }
673
674 /* Command-list for the "set/show ravenscar" prefix command. */
675 static struct cmd_list_element *set_ravenscar_list;
676 static struct cmd_list_element *show_ravenscar_list;
677
678 /* Implement the "show ravenscar task-switching" command. */
679
680 static void
681 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
682 struct cmd_list_element *c,
683 const char *value)
684 {
685 if (ravenscar_task_support)
686 fprintf_filtered (file, _("\
687 Support for Ravenscar task/thread switching is enabled\n"));
688 else
689 fprintf_filtered (file, _("\
690 Support for Ravenscar task/thread switching is disabled\n"));
691 }
692
693 /* Module startup initialization function, automagically called by
694 init.c. */
695
696 void _initialize_ravenscar ();
697 void
698 _initialize_ravenscar ()
699 {
700 /* Notice when the inferior is created in order to push the
701 ravenscar ops if needed. */
702 gdb::observers::inferior_created.attach (ravenscar_inferior_created);
703
704 add_basic_prefix_cmd ("ravenscar", no_class,
705 _("Prefix command for changing Ravenscar-specific settings."),
706 &set_ravenscar_list, "set ravenscar ", 0, &setlist);
707
708 add_show_prefix_cmd ("ravenscar", no_class,
709 _("Prefix command for showing Ravenscar-specific settings."),
710 &show_ravenscar_list, "show ravenscar ", 0, &showlist);
711
712 add_setshow_boolean_cmd ("task-switching", class_obscure,
713 &ravenscar_task_support, _("\
714 Enable or disable support for GNAT Ravenscar tasks."), _("\
715 Show whether support for GNAT Ravenscar tasks is enabled."),
716 _("\
717 Enable or disable support for task/thread switching with the GNAT\n\
718 Ravenscar run-time library for bareboard configuration."),
719 NULL, show_ravenscar_task_switching_command,
720 &set_ravenscar_list, &show_ravenscar_list);
721 }