]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/ravenscar-thread.c
Set inferior_ptid in ravenscar_thread_target::update_thread_list
[thirdparty/binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
63
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
71
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76 };
77
78 struct ravenscar_thread_target final : public target_ops
79 {
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
82 {
83 }
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, long thread) override;
122
123 struct btrace_target_info *enable_btrace (ptid_t ptid,
124 const struct btrace_config *conf)
125 override
126 {
127 ptid = get_base_thread_from_ravenscar_task (ptid);
128 return beneath ()->enable_btrace (ptid, conf);
129 }
130
131 void mourn_inferior () override;
132
133 void close () override
134 {
135 delete this;
136 }
137
138 thread_info *add_active_thread ();
139
140 private:
141
142 /* PTID of the last thread that received an event.
143 This can be useful to determine the associated task that received
144 the event, to make it the current task. */
145 ptid_t m_base_ptid;
146
147 ptid_t active_task (int cpu);
148 bool task_is_currently_active (ptid_t ptid);
149 bool runtime_initialized ();
150 int get_thread_base_cpu (ptid_t ptid);
151 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
152 void add_thread (struct ada_task_info *task);
153
154 /* This maps a TID to the CPU on which it was running. This is
155 needed because sometimes the runtime will report an active task
156 that hasn't yet been put on the list of tasks that is read by
157 ada-tasks.c. */
158 std::unordered_map<long, int> m_cpu_map;
159 };
160
161 /* Return true iff PTID corresponds to a ravenscar task. */
162
163 static bool
164 is_ravenscar_task (ptid_t ptid)
165 {
166 /* By construction, ravenscar tasks have their LWP set to zero.
167 Also make sure that the TID is nonzero, as some remotes, when
168 asked for the list of threads, will return the first thread
169 as having its TID set to zero. For instance, TSIM version
170 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
171 query, which the remote protocol layer then treats as a thread
172 whose TID is 0. This is obviously not a ravenscar task. */
173 return ptid.lwp () == 0 && ptid.tid () != 0;
174 }
175
176 /* Given PTID, which can be either a ravenscar task or a CPU thread,
177 return which CPU that ptid is running on.
178
179 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
180 will be triggered. */
181
182 int
183 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
184 {
185 int base_cpu;
186
187 if (is_ravenscar_task (ptid))
188 {
189 /* Prefer to not read inferior memory if possible, to avoid
190 reentrancy problems with xfer_partial. */
191 auto iter = m_cpu_map.find (ptid.tid ());
192
193 if (iter != m_cpu_map.end ())
194 base_cpu = iter->second;
195 else
196 {
197 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
198
199 gdb_assert (task_info != NULL);
200 base_cpu = task_info->base_cpu;
201 }
202 }
203 else
204 {
205 /* We assume that the LWP of the PTID is equal to the CPU number. */
206 base_cpu = ptid.lwp ();
207 }
208
209 return base_cpu;
210 }
211
212 /* Given a ravenscar task (identified by its ptid_t PTID), return true
213 if this task is the currently active task on the cpu that task is
214 running on.
215
216 In other words, this function determine which CPU this task is
217 currently running on, and then return nonzero if the CPU in question
218 is executing the code for that task. If that's the case, then
219 that task's registers are in the CPU bank. Otherwise, the task
220 is currently suspended, and its registers have been saved in memory. */
221
222 bool
223 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
224 {
225 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
226
227 return ptid == active_task_ptid;
228 }
229
230 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
231 task is running.
232
233 This is the thread that corresponds to the CPU on which the task
234 is running. */
235
236 ptid_t
237 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
238 {
239 int base_cpu;
240
241 if (!is_ravenscar_task (ptid))
242 return ptid;
243
244 base_cpu = get_thread_base_cpu (ptid);
245 return ptid_t (ptid.pid (), base_cpu, 0);
246 }
247
248 /* Fetch the ravenscar running thread from target memory, make sure
249 there's a corresponding thread in the thread list, and return it.
250 If the runtime is not initialized, return NULL. */
251
252 thread_info *
253 ravenscar_thread_target::add_active_thread ()
254 {
255 process_stratum_target *proc_target
256 = as_process_stratum_target (this->beneath ());
257
258 int base_cpu;
259
260 gdb_assert (!is_ravenscar_task (m_base_ptid));
261 base_cpu = get_thread_base_cpu (m_base_ptid);
262
263 if (!runtime_initialized ())
264 return nullptr;
265
266 /* Make sure we set m_base_ptid before calling active_task
267 as the latter relies on it. */
268 ptid_t active_ptid = active_task (base_cpu);
269 gdb_assert (active_ptid != null_ptid);
270
271 /* The running thread may not have been added to
272 system.tasking.debug's list yet; so ravenscar_update_thread_list
273 may not always add it to the thread list. Add it here. */
274 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
275 if (active_thr == nullptr)
276 {
277 active_thr = ::add_thread (proc_target, active_ptid);
278 m_cpu_map[active_ptid.tid ()] = base_cpu;
279 }
280 return active_thr;
281 }
282
283 /* The Ravenscar Runtime exports a symbol which contains the ID of
284 the thread that is currently running. Try to locate that symbol
285 and return its associated minimal symbol.
286 Return NULL if not found. */
287
288 static struct bound_minimal_symbol
289 get_running_thread_msymbol ()
290 {
291 struct bound_minimal_symbol msym;
292
293 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
294 if (!msym.minsym)
295 /* Older versions of the GNAT runtime were using a different
296 (less ideal) name for the symbol where the active thread ID
297 is stored. If we couldn't find the symbol using the latest
298 name, then try the old one. */
299 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
300
301 return msym;
302 }
303
304 /* Return True if the Ada Ravenscar run-time can be found in the
305 application. */
306
307 static bool
308 has_ravenscar_runtime ()
309 {
310 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
311 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
312 struct bound_minimal_symbol msym_known_tasks
313 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
314 struct bound_minimal_symbol msym_first_task
315 = lookup_minimal_symbol (first_task_name, NULL, NULL);
316 struct bound_minimal_symbol msym_running_thread
317 = get_running_thread_msymbol ();
318
319 return (msym_ravenscar_runtime_initializer.minsym
320 && (msym_known_tasks.minsym || msym_first_task.minsym)
321 && msym_running_thread.minsym);
322 }
323
324 /* Return True if the Ada Ravenscar run-time can be found in the
325 application, and if it has been initialized on target. */
326
327 bool
328 ravenscar_thread_target::runtime_initialized ()
329 {
330 return active_task (1) != null_ptid;
331 }
332
333 /* Return the ID of the thread that is currently running.
334 Return 0 if the ID could not be determined. */
335
336 static CORE_ADDR
337 get_running_thread_id (int cpu)
338 {
339 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
340 int object_size;
341 int buf_size;
342 gdb_byte *buf;
343 CORE_ADDR object_addr;
344 struct type *builtin_type_void_data_ptr
345 = builtin_type (target_gdbarch ())->builtin_data_ptr;
346
347 if (!object_msym.minsym)
348 return 0;
349
350 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
351 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
352 + (cpu - 1) * object_size);
353 buf_size = object_size;
354 buf = (gdb_byte *) alloca (buf_size);
355 read_memory (object_addr, buf, buf_size);
356 return extract_typed_address (buf, builtin_type_void_data_ptr);
357 }
358
359 void
360 ravenscar_thread_target::resume (ptid_t ptid, int step,
361 enum gdb_signal siggnal)
362 {
363 /* If we see a wildcard resume, we simply pass that on. Otherwise,
364 arrange to resume the base ptid. */
365 inferior_ptid = m_base_ptid;
366 if (ptid.is_pid ())
367 {
368 /* We only have one process, so resume all threads of it. */
369 ptid = minus_one_ptid;
370 }
371 else if (ptid != minus_one_ptid)
372 ptid = m_base_ptid;
373 beneath ()->resume (ptid, step, siggnal);
374 }
375
376 ptid_t
377 ravenscar_thread_target::wait (ptid_t ptid,
378 struct target_waitstatus *status,
379 int options)
380 {
381 process_stratum_target *beneath
382 = as_process_stratum_target (this->beneath ());
383 ptid_t event_ptid;
384
385 if (ptid != minus_one_ptid)
386 ptid = m_base_ptid;
387 event_ptid = beneath->wait (ptid, status, 0);
388 /* Find any new threads that might have been created, and return the
389 active thread.
390
391 Only do it if the program is still alive, though. Otherwise,
392 this causes problems when debugging through the remote protocol,
393 because we might try switching threads (and thus sending packets)
394 after the remote has disconnected. */
395 if (status->kind != TARGET_WAITKIND_EXITED
396 && status->kind != TARGET_WAITKIND_SIGNALLED
397 && runtime_initialized ())
398 {
399 m_base_ptid = event_ptid;
400 this->update_thread_list ();
401 return this->add_active_thread ()->ptid;
402 }
403 return event_ptid;
404 }
405
406 /* Add the thread associated to the given TASK to the thread list
407 (if the thread has already been added, this is a no-op). */
408
409 void
410 ravenscar_thread_target::add_thread (struct ada_task_info *task)
411 {
412 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
413 {
414 ::add_thread (current_inferior ()->process_target (), task->ptid);
415 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
416 }
417 }
418
419 void
420 ravenscar_thread_target::update_thread_list ()
421 {
422 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
423 but this isn't always the case in target methods. So, we ensure
424 it here. */
425 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
426 m_base_ptid);
427
428 /* Do not clear the thread list before adding the Ada task, to keep
429 the thread that the process stratum has included into it
430 (m_base_ptid) and the running thread, that may not have been included
431 to system.tasking.debug's list yet. */
432
433 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
434 {
435 this->add_thread (task);
436 });
437 }
438
439 ptid_t
440 ravenscar_thread_target::active_task (int cpu)
441 {
442 CORE_ADDR tid = get_running_thread_id (cpu);
443
444 if (tid == 0)
445 return null_ptid;
446 else
447 return ptid_t (m_base_ptid.pid (), 0, tid);
448 }
449
450 bool
451 ravenscar_thread_target::thread_alive (ptid_t ptid)
452 {
453 /* Ravenscar tasks are non-terminating. */
454 return true;
455 }
456
457 std::string
458 ravenscar_thread_target::pid_to_str (ptid_t ptid)
459 {
460 if (!is_ravenscar_task (ptid))
461 return beneath ()->pid_to_str (ptid);
462
463 return string_printf ("Ravenscar Thread %#x", (int) ptid.tid ());
464 }
465
466 /* Temporarily set the ptid of a regcache to some other value. When
467 this object is destroyed, the regcache's original ptid is
468 restored. */
469
470 class temporarily_change_regcache_ptid
471 {
472 public:
473
474 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
475 : m_regcache (regcache),
476 m_save_ptid (regcache->ptid ())
477 {
478 m_regcache->set_ptid (new_ptid);
479 }
480
481 ~temporarily_change_regcache_ptid ()
482 {
483 m_regcache->set_ptid (m_save_ptid);
484 }
485
486 private:
487
488 /* The regcache. */
489 struct regcache *m_regcache;
490 /* The saved ptid. */
491 ptid_t m_save_ptid;
492 };
493
494 void
495 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
496 {
497 ptid_t ptid = regcache->ptid ();
498
499 if (runtime_initialized () && is_ravenscar_task (ptid))
500 {
501 if (task_is_currently_active (ptid))
502 {
503 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
504 temporarily_change_regcache_ptid changer (regcache, base);
505 beneath ()->fetch_registers (regcache, regnum);
506 }
507 else
508 {
509 struct gdbarch *gdbarch = regcache->arch ();
510 struct ravenscar_arch_ops *arch_ops
511 = gdbarch_ravenscar_ops (gdbarch);
512
513 arch_ops->fetch_registers (regcache, regnum);
514 }
515 }
516 else
517 beneath ()->fetch_registers (regcache, regnum);
518 }
519
520 void
521 ravenscar_thread_target::store_registers (struct regcache *regcache,
522 int regnum)
523 {
524 ptid_t ptid = regcache->ptid ();
525
526 if (runtime_initialized () && is_ravenscar_task (ptid))
527 {
528 if (task_is_currently_active (ptid))
529 {
530 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
531 temporarily_change_regcache_ptid changer (regcache, base);
532 beneath ()->store_registers (regcache, regnum);
533 }
534 else
535 {
536 struct gdbarch *gdbarch = regcache->arch ();
537 struct ravenscar_arch_ops *arch_ops
538 = gdbarch_ravenscar_ops (gdbarch);
539
540 arch_ops->store_registers (regcache, regnum);
541 }
542 }
543 else
544 beneath ()->store_registers (regcache, regnum);
545 }
546
547 void
548 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
549 {
550 ptid_t ptid = regcache->ptid ();
551
552 if (runtime_initialized () && is_ravenscar_task (ptid))
553 {
554 if (task_is_currently_active (ptid))
555 {
556 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
557 temporarily_change_regcache_ptid changer (regcache, base);
558 beneath ()->prepare_to_store (regcache);
559 }
560 else
561 {
562 /* Nothing. */
563 }
564 }
565 else
566 beneath ()->prepare_to_store (regcache);
567 }
568
569 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
570
571 bool
572 ravenscar_thread_target::stopped_by_sw_breakpoint ()
573 {
574 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
575 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
576 return beneath ()->stopped_by_sw_breakpoint ();
577 }
578
579 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
580
581 bool
582 ravenscar_thread_target::stopped_by_hw_breakpoint ()
583 {
584 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
585 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
586 return beneath ()->stopped_by_hw_breakpoint ();
587 }
588
589 /* Implement the to_stopped_by_watchpoint target_ops "method". */
590
591 bool
592 ravenscar_thread_target::stopped_by_watchpoint ()
593 {
594 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
595 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
596 return beneath ()->stopped_by_watchpoint ();
597 }
598
599 /* Implement the to_stopped_data_address target_ops "method". */
600
601 bool
602 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
603 {
604 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
605 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
606 return beneath ()->stopped_data_address (addr_p);
607 }
608
609 void
610 ravenscar_thread_target::mourn_inferior ()
611 {
612 m_base_ptid = null_ptid;
613 target_ops *beneath = this->beneath ();
614 unpush_target (this);
615 beneath->mourn_inferior ();
616 }
617
618 /* Implement the to_core_of_thread target_ops "method". */
619
620 int
621 ravenscar_thread_target::core_of_thread (ptid_t ptid)
622 {
623 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
624 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
625 return beneath ()->core_of_thread (inferior_ptid);
626 }
627
628 /* Implement the target xfer_partial method. */
629
630 enum target_xfer_status
631 ravenscar_thread_target::xfer_partial (enum target_object object,
632 const char *annex,
633 gdb_byte *readbuf,
634 const gdb_byte *writebuf,
635 ULONGEST offset, ULONGEST len,
636 ULONGEST *xfered_len)
637 {
638 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
639 /* Calling get_base_thread_from_ravenscar_task can read memory from
640 the inferior. However, that function is written to prefer our
641 internal map, so it should not result in recursive calls in
642 practice. */
643 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
644 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
645 offset, len, xfered_len);
646 }
647
648 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
649
650 static void
651 ravenscar_inferior_created (struct target_ops *target, int from_tty)
652 {
653 const char *err_msg;
654
655 if (!ravenscar_task_support
656 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
657 || !has_ravenscar_runtime ())
658 return;
659
660 err_msg = ada_get_tcb_types_info ();
661 if (err_msg != NULL)
662 {
663 warning (_("%s. Task/thread support disabled."), err_msg);
664 return;
665 }
666
667 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
668 push_target (target_ops_up (rtarget));
669 thread_info *thr = rtarget->add_active_thread ();
670 if (thr != nullptr)
671 switch_to_thread (thr);
672 }
673
674 ptid_t
675 ravenscar_thread_target::get_ada_task_ptid (long lwp, long thread)
676 {
677 return ptid_t (m_base_ptid.pid (), 0, thread);
678 }
679
680 /* Command-list for the "set/show ravenscar" prefix command. */
681 static struct cmd_list_element *set_ravenscar_list;
682 static struct cmd_list_element *show_ravenscar_list;
683
684 /* Implement the "show ravenscar task-switching" command. */
685
686 static void
687 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
688 struct cmd_list_element *c,
689 const char *value)
690 {
691 if (ravenscar_task_support)
692 fprintf_filtered (file, _("\
693 Support for Ravenscar task/thread switching is enabled\n"));
694 else
695 fprintf_filtered (file, _("\
696 Support for Ravenscar task/thread switching is disabled\n"));
697 }
698
699 /* Module startup initialization function, automagically called by
700 init.c. */
701
702 void _initialize_ravenscar ();
703 void
704 _initialize_ravenscar ()
705 {
706 /* Notice when the inferior is created in order to push the
707 ravenscar ops if needed. */
708 gdb::observers::inferior_created.attach (ravenscar_inferior_created);
709
710 add_basic_prefix_cmd ("ravenscar", no_class,
711 _("Prefix command for changing Ravenscar-specific settings."),
712 &set_ravenscar_list, "set ravenscar ", 0, &setlist);
713
714 add_show_prefix_cmd ("ravenscar", no_class,
715 _("Prefix command for showing Ravenscar-specific settings."),
716 &show_ravenscar_list, "show ravenscar ", 0, &showlist);
717
718 add_setshow_boolean_cmd ("task-switching", class_obscure,
719 &ravenscar_task_support, _("\
720 Enable or disable support for GNAT Ravenscar tasks."), _("\
721 Show whether support for GNAT Ravenscar tasks is enabled."),
722 _("\
723 Enable or disable support for task/thread switching with the GNAT\n\
724 Ravenscar run-time library for bareboard configuration."),
725 NULL, show_ravenscar_task_switching_command,
726 &set_ravenscar_list, &show_ravenscar_list);
727 }