gdb: include gdbsupport/buildargv.h in ser-mingw.c
[binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
63
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
71
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76 };
77
78 struct ravenscar_thread_target final : public target_ops
79 {
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
82 {
83 }
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
122
123 struct btrace_target_info *enable_btrace (ptid_t ptid,
124 const struct btrace_config *conf)
125 override
126 {
127 ptid = get_base_thread_from_ravenscar_task (ptid);
128 return beneath ()->enable_btrace (ptid, conf);
129 }
130
131 void mourn_inferior () override;
132
133 void close () override
134 {
135 delete this;
136 }
137
138 thread_info *add_active_thread ();
139
140 private:
141
142 /* PTID of the last thread that received an event.
143 This can be useful to determine the associated task that received
144 the event, to make it the current task. */
145 ptid_t m_base_ptid;
146
147 ptid_t active_task (int cpu);
148 bool task_is_currently_active (ptid_t ptid);
149 bool runtime_initialized ();
150 int get_thread_base_cpu (ptid_t ptid);
151 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
152 void add_thread (struct ada_task_info *task);
153
154 /* Like switch_to_thread, but uses the base ptid for the thread. */
155 void set_base_thread_from_ravenscar_task (ptid_t ptid)
156 {
157 process_stratum_target *proc_target
158 = as_process_stratum_target (this->beneath ());
159 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
160 switch_to_thread (find_thread_ptid (proc_target, underlying));
161 }
162
163 /* This maps a TID to the CPU on which it was running. This is
164 needed because sometimes the runtime will report an active task
165 that hasn't yet been put on the list of tasks that is read by
166 ada-tasks.c. */
167 std::unordered_map<ULONGEST, int> m_cpu_map;
168 };
169
170 /* Return true iff PTID corresponds to a ravenscar task. */
171
172 static bool
173 is_ravenscar_task (ptid_t ptid)
174 {
175 /* By construction, ravenscar tasks have their LWP set to zero.
176 Also make sure that the TID is nonzero, as some remotes, when
177 asked for the list of threads, will return the first thread
178 as having its TID set to zero. For instance, TSIM version
179 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
180 query, which the remote protocol layer then treats as a thread
181 whose TID is 0. This is obviously not a ravenscar task. */
182 return ptid.lwp () == 0 && ptid.tid () != 0;
183 }
184
185 /* Given PTID, which can be either a ravenscar task or a CPU thread,
186 return which CPU that ptid is running on.
187
188 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
189 will be triggered. */
190
191 int
192 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
193 {
194 int base_cpu;
195
196 if (is_ravenscar_task (ptid))
197 {
198 /* Prefer to not read inferior memory if possible, to avoid
199 reentrancy problems with xfer_partial. */
200 auto iter = m_cpu_map.find (ptid.tid ());
201
202 if (iter != m_cpu_map.end ())
203 base_cpu = iter->second;
204 else
205 {
206 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
207
208 gdb_assert (task_info != NULL);
209 base_cpu = task_info->base_cpu;
210 }
211 }
212 else
213 {
214 /* We assume that the LWP of the PTID is equal to the CPU number. */
215 base_cpu = ptid.lwp ();
216 }
217
218 return base_cpu;
219 }
220
221 /* Given a ravenscar task (identified by its ptid_t PTID), return true
222 if this task is the currently active task on the cpu that task is
223 running on.
224
225 In other words, this function determine which CPU this task is
226 currently running on, and then return nonzero if the CPU in question
227 is executing the code for that task. If that's the case, then
228 that task's registers are in the CPU bank. Otherwise, the task
229 is currently suspended, and its registers have been saved in memory. */
230
231 bool
232 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
233 {
234 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
235
236 return ptid == active_task_ptid;
237 }
238
239 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
240 task is running.
241
242 This is the thread that corresponds to the CPU on which the task
243 is running. */
244
245 ptid_t
246 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
247 {
248 int base_cpu;
249
250 if (!is_ravenscar_task (ptid))
251 return ptid;
252
253 base_cpu = get_thread_base_cpu (ptid);
254 return ptid_t (ptid.pid (), base_cpu);
255 }
256
257 /* Fetch the ravenscar running thread from target memory, make sure
258 there's a corresponding thread in the thread list, and return it.
259 If the runtime is not initialized, return NULL. */
260
261 thread_info *
262 ravenscar_thread_target::add_active_thread ()
263 {
264 process_stratum_target *proc_target
265 = as_process_stratum_target (this->beneath ());
266
267 int base_cpu;
268
269 gdb_assert (!is_ravenscar_task (m_base_ptid));
270 base_cpu = get_thread_base_cpu (m_base_ptid);
271
272 if (!runtime_initialized ())
273 return nullptr;
274
275 /* Make sure we set m_base_ptid before calling active_task
276 as the latter relies on it. */
277 ptid_t active_ptid = active_task (base_cpu);
278 gdb_assert (active_ptid != null_ptid);
279
280 /* The running thread may not have been added to
281 system.tasking.debug's list yet; so ravenscar_update_thread_list
282 may not always add it to the thread list. Add it here. */
283 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
284 if (active_thr == nullptr)
285 {
286 active_thr = ::add_thread (proc_target, active_ptid);
287 m_cpu_map[active_ptid.tid ()] = base_cpu;
288 }
289 return active_thr;
290 }
291
292 /* The Ravenscar Runtime exports a symbol which contains the ID of
293 the thread that is currently running. Try to locate that symbol
294 and return its associated minimal symbol.
295 Return NULL if not found. */
296
297 static struct bound_minimal_symbol
298 get_running_thread_msymbol ()
299 {
300 struct bound_minimal_symbol msym;
301
302 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
303 if (!msym.minsym)
304 /* Older versions of the GNAT runtime were using a different
305 (less ideal) name for the symbol where the active thread ID
306 is stored. If we couldn't find the symbol using the latest
307 name, then try the old one. */
308 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
309
310 return msym;
311 }
312
313 /* Return True if the Ada Ravenscar run-time can be found in the
314 application. */
315
316 static bool
317 has_ravenscar_runtime ()
318 {
319 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
320 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
321 struct bound_minimal_symbol msym_known_tasks
322 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
323 struct bound_minimal_symbol msym_first_task
324 = lookup_minimal_symbol (first_task_name, NULL, NULL);
325 struct bound_minimal_symbol msym_running_thread
326 = get_running_thread_msymbol ();
327
328 return (msym_ravenscar_runtime_initializer.minsym
329 && (msym_known_tasks.minsym || msym_first_task.minsym)
330 && msym_running_thread.minsym);
331 }
332
333 /* Return True if the Ada Ravenscar run-time can be found in the
334 application, and if it has been initialized on target. */
335
336 bool
337 ravenscar_thread_target::runtime_initialized ()
338 {
339 return active_task (1) != null_ptid;
340 }
341
342 /* Return the ID of the thread that is currently running.
343 Return 0 if the ID could not be determined. */
344
345 static CORE_ADDR
346 get_running_thread_id (int cpu)
347 {
348 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
349 int object_size;
350 int buf_size;
351 gdb_byte *buf;
352 CORE_ADDR object_addr;
353 struct type *builtin_type_void_data_ptr
354 = builtin_type (target_gdbarch ())->builtin_data_ptr;
355
356 if (!object_msym.minsym)
357 return 0;
358
359 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
360 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
361 + (cpu - 1) * object_size);
362 buf_size = object_size;
363 buf = (gdb_byte *) alloca (buf_size);
364 read_memory (object_addr, buf, buf_size);
365 return extract_typed_address (buf, builtin_type_void_data_ptr);
366 }
367
368 void
369 ravenscar_thread_target::resume (ptid_t ptid, int step,
370 enum gdb_signal siggnal)
371 {
372 /* If we see a wildcard resume, we simply pass that on. Otherwise,
373 arrange to resume the base ptid. */
374 inferior_ptid = m_base_ptid;
375 if (ptid.is_pid ())
376 {
377 /* We only have one process, so resume all threads of it. */
378 ptid = minus_one_ptid;
379 }
380 else if (ptid != minus_one_ptid)
381 ptid = m_base_ptid;
382 beneath ()->resume (ptid, step, siggnal);
383 }
384
385 ptid_t
386 ravenscar_thread_target::wait (ptid_t ptid,
387 struct target_waitstatus *status,
388 target_wait_flags options)
389 {
390 process_stratum_target *beneath
391 = as_process_stratum_target (this->beneath ());
392 ptid_t event_ptid;
393
394 if (ptid != minus_one_ptid)
395 ptid = m_base_ptid;
396 event_ptid = beneath->wait (ptid, status, 0);
397 /* Find any new threads that might have been created, and return the
398 active thread.
399
400 Only do it if the program is still alive, though. Otherwise,
401 this causes problems when debugging through the remote protocol,
402 because we might try switching threads (and thus sending packets)
403 after the remote has disconnected. */
404 if (status->kind () != TARGET_WAITKIND_EXITED
405 && status->kind () != TARGET_WAITKIND_SIGNALLED
406 && runtime_initialized ())
407 {
408 m_base_ptid = event_ptid;
409 this->update_thread_list ();
410 return this->add_active_thread ()->ptid;
411 }
412 return event_ptid;
413 }
414
415 /* Add the thread associated to the given TASK to the thread list
416 (if the thread has already been added, this is a no-op). */
417
418 void
419 ravenscar_thread_target::add_thread (struct ada_task_info *task)
420 {
421 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
422 {
423 ::add_thread (current_inferior ()->process_target (), task->ptid);
424 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
425 }
426 }
427
428 void
429 ravenscar_thread_target::update_thread_list ()
430 {
431 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
432 but this isn't always the case in target methods. So, we ensure
433 it here. */
434 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
435 m_base_ptid);
436
437 /* Do not clear the thread list before adding the Ada task, to keep
438 the thread that the process stratum has included into it
439 (m_base_ptid) and the running thread, that may not have been included
440 to system.tasking.debug's list yet. */
441
442 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
443 {
444 this->add_thread (task);
445 });
446 }
447
448 ptid_t
449 ravenscar_thread_target::active_task (int cpu)
450 {
451 CORE_ADDR tid = get_running_thread_id (cpu);
452
453 if (tid == 0)
454 return null_ptid;
455 else
456 return ptid_t (m_base_ptid.pid (), 0, tid);
457 }
458
459 bool
460 ravenscar_thread_target::thread_alive (ptid_t ptid)
461 {
462 /* Ravenscar tasks are non-terminating. */
463 return true;
464 }
465
466 std::string
467 ravenscar_thread_target::pid_to_str (ptid_t ptid)
468 {
469 if (!is_ravenscar_task (ptid))
470 return beneath ()->pid_to_str (ptid);
471
472 return string_printf ("Ravenscar Thread 0x%s",
473 phex_nz (ptid.tid (), sizeof (ULONGEST)));
474 }
475
476 /* Temporarily set the ptid of a regcache to some other value. When
477 this object is destroyed, the regcache's original ptid is
478 restored. */
479
480 class temporarily_change_regcache_ptid
481 {
482 public:
483
484 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
485 : m_regcache (regcache),
486 m_save_ptid (regcache->ptid ())
487 {
488 m_regcache->set_ptid (new_ptid);
489 }
490
491 ~temporarily_change_regcache_ptid ()
492 {
493 m_regcache->set_ptid (m_save_ptid);
494 }
495
496 private:
497
498 /* The regcache. */
499 struct regcache *m_regcache;
500 /* The saved ptid. */
501 ptid_t m_save_ptid;
502 };
503
504 void
505 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
506 {
507 ptid_t ptid = regcache->ptid ();
508
509 if (runtime_initialized () && is_ravenscar_task (ptid))
510 {
511 if (task_is_currently_active (ptid))
512 {
513 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
514 temporarily_change_regcache_ptid changer (regcache, base);
515 beneath ()->fetch_registers (regcache, regnum);
516 }
517 else
518 {
519 struct gdbarch *gdbarch = regcache->arch ();
520 struct ravenscar_arch_ops *arch_ops
521 = gdbarch_ravenscar_ops (gdbarch);
522
523 arch_ops->fetch_registers (regcache, regnum);
524 }
525 }
526 else
527 beneath ()->fetch_registers (regcache, regnum);
528 }
529
530 void
531 ravenscar_thread_target::store_registers (struct regcache *regcache,
532 int regnum)
533 {
534 ptid_t ptid = regcache->ptid ();
535
536 if (runtime_initialized () && is_ravenscar_task (ptid))
537 {
538 if (task_is_currently_active (ptid))
539 {
540 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
541 temporarily_change_regcache_ptid changer (regcache, base);
542 beneath ()->store_registers (regcache, regnum);
543 }
544 else
545 {
546 struct gdbarch *gdbarch = regcache->arch ();
547 struct ravenscar_arch_ops *arch_ops
548 = gdbarch_ravenscar_ops (gdbarch);
549
550 arch_ops->store_registers (regcache, regnum);
551 }
552 }
553 else
554 beneath ()->store_registers (regcache, regnum);
555 }
556
557 void
558 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
559 {
560 ptid_t ptid = regcache->ptid ();
561
562 if (runtime_initialized () && is_ravenscar_task (ptid))
563 {
564 if (task_is_currently_active (ptid))
565 {
566 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
567 temporarily_change_regcache_ptid changer (regcache, base);
568 beneath ()->prepare_to_store (regcache);
569 }
570 else
571 {
572 /* Nothing. */
573 }
574 }
575 else
576 beneath ()->prepare_to_store (regcache);
577 }
578
579 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
580
581 bool
582 ravenscar_thread_target::stopped_by_sw_breakpoint ()
583 {
584 scoped_restore_current_thread saver;
585 set_base_thread_from_ravenscar_task (inferior_ptid);
586 return beneath ()->stopped_by_sw_breakpoint ();
587 }
588
589 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
590
591 bool
592 ravenscar_thread_target::stopped_by_hw_breakpoint ()
593 {
594 scoped_restore_current_thread saver;
595 set_base_thread_from_ravenscar_task (inferior_ptid);
596 return beneath ()->stopped_by_hw_breakpoint ();
597 }
598
599 /* Implement the to_stopped_by_watchpoint target_ops "method". */
600
601 bool
602 ravenscar_thread_target::stopped_by_watchpoint ()
603 {
604 scoped_restore_current_thread saver;
605 set_base_thread_from_ravenscar_task (inferior_ptid);
606 return beneath ()->stopped_by_watchpoint ();
607 }
608
609 /* Implement the to_stopped_data_address target_ops "method". */
610
611 bool
612 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
613 {
614 scoped_restore_current_thread saver;
615 set_base_thread_from_ravenscar_task (inferior_ptid);
616 return beneath ()->stopped_data_address (addr_p);
617 }
618
619 void
620 ravenscar_thread_target::mourn_inferior ()
621 {
622 m_base_ptid = null_ptid;
623 target_ops *beneath = this->beneath ();
624 current_inferior ()->unpush_target (this);
625 beneath->mourn_inferior ();
626 }
627
628 /* Implement the to_core_of_thread target_ops "method". */
629
630 int
631 ravenscar_thread_target::core_of_thread (ptid_t ptid)
632 {
633 scoped_restore_current_thread saver;
634 set_base_thread_from_ravenscar_task (inferior_ptid);
635 return beneath ()->core_of_thread (inferior_ptid);
636 }
637
638 /* Implement the target xfer_partial method. */
639
640 enum target_xfer_status
641 ravenscar_thread_target::xfer_partial (enum target_object object,
642 const char *annex,
643 gdb_byte *readbuf,
644 const gdb_byte *writebuf,
645 ULONGEST offset, ULONGEST len,
646 ULONGEST *xfered_len)
647 {
648 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
649 /* Calling get_base_thread_from_ravenscar_task can read memory from
650 the inferior. However, that function is written to prefer our
651 internal map, so it should not result in recursive calls in
652 practice. */
653 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
654 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
655 offset, len, xfered_len);
656 }
657
658 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
659
660 static void
661 ravenscar_inferior_created (inferior *inf)
662 {
663 const char *err_msg;
664
665 if (!ravenscar_task_support
666 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
667 || !has_ravenscar_runtime ())
668 return;
669
670 err_msg = ada_get_tcb_types_info ();
671 if (err_msg != NULL)
672 {
673 warning (_("%s. Task/thread support disabled."), err_msg);
674 return;
675 }
676
677 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
678 inf->push_target (target_ops_up (rtarget));
679 thread_info *thr = rtarget->add_active_thread ();
680 if (thr != nullptr)
681 switch_to_thread (thr);
682 }
683
684 ptid_t
685 ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
686 {
687 return ptid_t (m_base_ptid.pid (), 0, thread);
688 }
689
690 /* Command-list for the "set/show ravenscar" prefix command. */
691 static struct cmd_list_element *set_ravenscar_list;
692 static struct cmd_list_element *show_ravenscar_list;
693
694 /* Implement the "show ravenscar task-switching" command. */
695
696 static void
697 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
698 struct cmd_list_element *c,
699 const char *value)
700 {
701 if (ravenscar_task_support)
702 fprintf_filtered (file, _("\
703 Support for Ravenscar task/thread switching is enabled\n"));
704 else
705 fprintf_filtered (file, _("\
706 Support for Ravenscar task/thread switching is disabled\n"));
707 }
708
709 /* Module startup initialization function, automagically called by
710 init.c. */
711
712 void _initialize_ravenscar ();
713 void
714 _initialize_ravenscar ()
715 {
716 /* Notice when the inferior is created in order to push the
717 ravenscar ops if needed. */
718 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
719 "ravenscar-thread");
720
721 add_setshow_prefix_cmd
722 ("ravenscar", no_class,
723 _("Prefix command for changing Ravenscar-specific settings."),
724 _("Prefix command for showing Ravenscar-specific settings."),
725 &set_ravenscar_list, &show_ravenscar_list,
726 &setlist, &showlist);
727
728 add_setshow_boolean_cmd ("task-switching", class_obscure,
729 &ravenscar_task_support, _("\
730 Enable or disable support for GNAT Ravenscar tasks."), _("\
731 Show whether support for GNAT Ravenscar tasks is enabled."),
732 _("\
733 Enable or disable support for task/thread switching with the GNAT\n\
734 Ravenscar run-time library for bareboard configuration."),
735 NULL, show_ravenscar_task_switching_command,
736 &set_ravenscar_list, &show_ravenscar_list);
737 }