gdb/testsuite: some additional tests in gdb.tui/scroll.exp
[binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
63
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
71
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76 };
77
78 struct ravenscar_thread_target final : public target_ops
79 {
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
82 {
83 }
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
122
123 struct btrace_target_info *enable_btrace (thread_info *tp,
124 const struct btrace_config *conf)
125 override
126 {
127 process_stratum_target *proc_target
128 = as_process_stratum_target (this->beneath ());
129 ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
130 tp = find_thread_ptid (proc_target, underlying);
131
132 return beneath ()->enable_btrace (tp, conf);
133 }
134
135 void mourn_inferior () override;
136
137 void close () override
138 {
139 delete this;
140 }
141
142 thread_info *add_active_thread ();
143
144 private:
145
146 /* PTID of the last thread that received an event.
147 This can be useful to determine the associated task that received
148 the event, to make it the current task. */
149 ptid_t m_base_ptid;
150
151 ptid_t active_task (int cpu);
152 bool task_is_currently_active (ptid_t ptid);
153 bool runtime_initialized ();
154 int get_thread_base_cpu (ptid_t ptid);
155 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
156 void add_thread (struct ada_task_info *task);
157
158 /* Like switch_to_thread, but uses the base ptid for the thread. */
159 void set_base_thread_from_ravenscar_task (ptid_t ptid)
160 {
161 process_stratum_target *proc_target
162 = as_process_stratum_target (this->beneath ());
163 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
164 switch_to_thread (find_thread_ptid (proc_target, underlying));
165 }
166
167 /* This maps a TID to the CPU on which it was running. This is
168 needed because sometimes the runtime will report an active task
169 that hasn't yet been put on the list of tasks that is read by
170 ada-tasks.c. */
171 std::unordered_map<ULONGEST, int> m_cpu_map;
172 };
173
174 /* Return true iff PTID corresponds to a ravenscar task. */
175
176 static bool
177 is_ravenscar_task (ptid_t ptid)
178 {
179 /* By construction, ravenscar tasks have their LWP set to zero.
180 Also make sure that the TID is nonzero, as some remotes, when
181 asked for the list of threads, will return the first thread
182 as having its TID set to zero. For instance, TSIM version
183 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
184 query, which the remote protocol layer then treats as a thread
185 whose TID is 0. This is obviously not a ravenscar task. */
186 return ptid.lwp () == 0 && ptid.tid () != 0;
187 }
188
189 /* Given PTID, which can be either a ravenscar task or a CPU thread,
190 return which CPU that ptid is running on.
191
192 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
193 will be triggered. */
194
195 int
196 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
197 {
198 int base_cpu;
199
200 if (is_ravenscar_task (ptid))
201 {
202 /* Prefer to not read inferior memory if possible, to avoid
203 reentrancy problems with xfer_partial. */
204 auto iter = m_cpu_map.find (ptid.tid ());
205
206 if (iter != m_cpu_map.end ())
207 base_cpu = iter->second;
208 else
209 {
210 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
211
212 gdb_assert (task_info != NULL);
213 base_cpu = task_info->base_cpu;
214 }
215 }
216 else
217 {
218 /* We assume that the LWP of the PTID is equal to the CPU number. */
219 base_cpu = ptid.lwp ();
220 }
221
222 return base_cpu;
223 }
224
225 /* Given a ravenscar task (identified by its ptid_t PTID), return true
226 if this task is the currently active task on the cpu that task is
227 running on.
228
229 In other words, this function determine which CPU this task is
230 currently running on, and then return nonzero if the CPU in question
231 is executing the code for that task. If that's the case, then
232 that task's registers are in the CPU bank. Otherwise, the task
233 is currently suspended, and its registers have been saved in memory. */
234
235 bool
236 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
237 {
238 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
239
240 return ptid == active_task_ptid;
241 }
242
243 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
244 task is running.
245
246 This is the thread that corresponds to the CPU on which the task
247 is running. */
248
249 ptid_t
250 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
251 {
252 int base_cpu;
253
254 if (!is_ravenscar_task (ptid))
255 return ptid;
256
257 base_cpu = get_thread_base_cpu (ptid);
258 return ptid_t (ptid.pid (), base_cpu);
259 }
260
261 /* Fetch the ravenscar running thread from target memory, make sure
262 there's a corresponding thread in the thread list, and return it.
263 If the runtime is not initialized, return NULL. */
264
265 thread_info *
266 ravenscar_thread_target::add_active_thread ()
267 {
268 process_stratum_target *proc_target
269 = as_process_stratum_target (this->beneath ());
270
271 int base_cpu;
272
273 gdb_assert (!is_ravenscar_task (m_base_ptid));
274 base_cpu = get_thread_base_cpu (m_base_ptid);
275
276 if (!runtime_initialized ())
277 return nullptr;
278
279 /* Make sure we set m_base_ptid before calling active_task
280 as the latter relies on it. */
281 ptid_t active_ptid = active_task (base_cpu);
282 gdb_assert (active_ptid != null_ptid);
283
284 /* The running thread may not have been added to
285 system.tasking.debug's list yet; so ravenscar_update_thread_list
286 may not always add it to the thread list. Add it here. */
287 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
288 if (active_thr == nullptr)
289 {
290 active_thr = ::add_thread (proc_target, active_ptid);
291 m_cpu_map[active_ptid.tid ()] = base_cpu;
292 }
293 return active_thr;
294 }
295
296 /* The Ravenscar Runtime exports a symbol which contains the ID of
297 the thread that is currently running. Try to locate that symbol
298 and return its associated minimal symbol.
299 Return NULL if not found. */
300
301 static struct bound_minimal_symbol
302 get_running_thread_msymbol ()
303 {
304 struct bound_minimal_symbol msym;
305
306 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
307 if (!msym.minsym)
308 /* Older versions of the GNAT runtime were using a different
309 (less ideal) name for the symbol where the active thread ID
310 is stored. If we couldn't find the symbol using the latest
311 name, then try the old one. */
312 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
313
314 return msym;
315 }
316
317 /* Return True if the Ada Ravenscar run-time can be found in the
318 application. */
319
320 static bool
321 has_ravenscar_runtime ()
322 {
323 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
324 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
325 struct bound_minimal_symbol msym_known_tasks
326 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
327 struct bound_minimal_symbol msym_first_task
328 = lookup_minimal_symbol (first_task_name, NULL, NULL);
329 struct bound_minimal_symbol msym_running_thread
330 = get_running_thread_msymbol ();
331
332 return (msym_ravenscar_runtime_initializer.minsym
333 && (msym_known_tasks.minsym || msym_first_task.minsym)
334 && msym_running_thread.minsym);
335 }
336
337 /* Return True if the Ada Ravenscar run-time can be found in the
338 application, and if it has been initialized on target. */
339
340 bool
341 ravenscar_thread_target::runtime_initialized ()
342 {
343 return active_task (1) != null_ptid;
344 }
345
346 /* Return the ID of the thread that is currently running.
347 Return 0 if the ID could not be determined. */
348
349 static CORE_ADDR
350 get_running_thread_id (int cpu)
351 {
352 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
353 int object_size;
354 int buf_size;
355 gdb_byte *buf;
356 CORE_ADDR object_addr;
357 struct type *builtin_type_void_data_ptr
358 = builtin_type (target_gdbarch ())->builtin_data_ptr;
359
360 if (!object_msym.minsym)
361 return 0;
362
363 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
364 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
365 + (cpu - 1) * object_size);
366 buf_size = object_size;
367 buf = (gdb_byte *) alloca (buf_size);
368 read_memory (object_addr, buf, buf_size);
369 return extract_typed_address (buf, builtin_type_void_data_ptr);
370 }
371
372 void
373 ravenscar_thread_target::resume (ptid_t ptid, int step,
374 enum gdb_signal siggnal)
375 {
376 /* If we see a wildcard resume, we simply pass that on. Otherwise,
377 arrange to resume the base ptid. */
378 inferior_ptid = m_base_ptid;
379 if (ptid.is_pid ())
380 {
381 /* We only have one process, so resume all threads of it. */
382 ptid = minus_one_ptid;
383 }
384 else if (ptid != minus_one_ptid)
385 ptid = m_base_ptid;
386 beneath ()->resume (ptid, step, siggnal);
387 }
388
389 ptid_t
390 ravenscar_thread_target::wait (ptid_t ptid,
391 struct target_waitstatus *status,
392 target_wait_flags options)
393 {
394 process_stratum_target *beneath
395 = as_process_stratum_target (this->beneath ());
396 ptid_t event_ptid;
397
398 if (ptid != minus_one_ptid)
399 ptid = m_base_ptid;
400 event_ptid = beneath->wait (ptid, status, 0);
401 /* Find any new threads that might have been created, and return the
402 active thread.
403
404 Only do it if the program is still alive, though. Otherwise,
405 this causes problems when debugging through the remote protocol,
406 because we might try switching threads (and thus sending packets)
407 after the remote has disconnected. */
408 if (status->kind () != TARGET_WAITKIND_EXITED
409 && status->kind () != TARGET_WAITKIND_SIGNALLED
410 && runtime_initialized ())
411 {
412 m_base_ptid = event_ptid;
413 this->update_thread_list ();
414 return this->add_active_thread ()->ptid;
415 }
416 return event_ptid;
417 }
418
419 /* Add the thread associated to the given TASK to the thread list
420 (if the thread has already been added, this is a no-op). */
421
422 void
423 ravenscar_thread_target::add_thread (struct ada_task_info *task)
424 {
425 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
426 {
427 ::add_thread (current_inferior ()->process_target (), task->ptid);
428 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
429 }
430 }
431
432 void
433 ravenscar_thread_target::update_thread_list ()
434 {
435 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
436 but this isn't always the case in target methods. So, we ensure
437 it here. */
438 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
439 m_base_ptid);
440
441 /* Do not clear the thread list before adding the Ada task, to keep
442 the thread that the process stratum has included into it
443 (m_base_ptid) and the running thread, that may not have been included
444 to system.tasking.debug's list yet. */
445
446 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
447 {
448 this->add_thread (task);
449 });
450 }
451
452 ptid_t
453 ravenscar_thread_target::active_task (int cpu)
454 {
455 CORE_ADDR tid = get_running_thread_id (cpu);
456
457 if (tid == 0)
458 return null_ptid;
459 else
460 return ptid_t (m_base_ptid.pid (), 0, tid);
461 }
462
463 bool
464 ravenscar_thread_target::thread_alive (ptid_t ptid)
465 {
466 /* Ravenscar tasks are non-terminating. */
467 return true;
468 }
469
470 std::string
471 ravenscar_thread_target::pid_to_str (ptid_t ptid)
472 {
473 if (!is_ravenscar_task (ptid))
474 return beneath ()->pid_to_str (ptid);
475
476 return string_printf ("Ravenscar Thread 0x%s",
477 phex_nz (ptid.tid (), sizeof (ULONGEST)));
478 }
479
480 /* Temporarily set the ptid of a regcache to some other value. When
481 this object is destroyed, the regcache's original ptid is
482 restored. */
483
484 class temporarily_change_regcache_ptid
485 {
486 public:
487
488 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
489 : m_regcache (regcache),
490 m_save_ptid (regcache->ptid ())
491 {
492 m_regcache->set_ptid (new_ptid);
493 }
494
495 ~temporarily_change_regcache_ptid ()
496 {
497 m_regcache->set_ptid (m_save_ptid);
498 }
499
500 private:
501
502 /* The regcache. */
503 struct regcache *m_regcache;
504 /* The saved ptid. */
505 ptid_t m_save_ptid;
506 };
507
508 void
509 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
510 {
511 ptid_t ptid = regcache->ptid ();
512
513 if (runtime_initialized () && is_ravenscar_task (ptid))
514 {
515 if (task_is_currently_active (ptid))
516 {
517 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
518 temporarily_change_regcache_ptid changer (regcache, base);
519 beneath ()->fetch_registers (regcache, regnum);
520 }
521 else
522 {
523 struct gdbarch *gdbarch = regcache->arch ();
524 struct ravenscar_arch_ops *arch_ops
525 = gdbarch_ravenscar_ops (gdbarch);
526
527 arch_ops->fetch_registers (regcache, regnum);
528 }
529 }
530 else
531 beneath ()->fetch_registers (regcache, regnum);
532 }
533
534 void
535 ravenscar_thread_target::store_registers (struct regcache *regcache,
536 int regnum)
537 {
538 ptid_t ptid = regcache->ptid ();
539
540 if (runtime_initialized () && is_ravenscar_task (ptid))
541 {
542 if (task_is_currently_active (ptid))
543 {
544 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
545 temporarily_change_regcache_ptid changer (regcache, base);
546 beneath ()->store_registers (regcache, regnum);
547 }
548 else
549 {
550 struct gdbarch *gdbarch = regcache->arch ();
551 struct ravenscar_arch_ops *arch_ops
552 = gdbarch_ravenscar_ops (gdbarch);
553
554 arch_ops->store_registers (regcache, regnum);
555 }
556 }
557 else
558 beneath ()->store_registers (regcache, regnum);
559 }
560
561 void
562 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
563 {
564 ptid_t ptid = regcache->ptid ();
565
566 if (runtime_initialized () && is_ravenscar_task (ptid))
567 {
568 if (task_is_currently_active (ptid))
569 {
570 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
571 temporarily_change_regcache_ptid changer (regcache, base);
572 beneath ()->prepare_to_store (regcache);
573 }
574 else
575 {
576 /* Nothing. */
577 }
578 }
579 else
580 beneath ()->prepare_to_store (regcache);
581 }
582
583 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
584
585 bool
586 ravenscar_thread_target::stopped_by_sw_breakpoint ()
587 {
588 scoped_restore_current_thread saver;
589 set_base_thread_from_ravenscar_task (inferior_ptid);
590 return beneath ()->stopped_by_sw_breakpoint ();
591 }
592
593 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
594
595 bool
596 ravenscar_thread_target::stopped_by_hw_breakpoint ()
597 {
598 scoped_restore_current_thread saver;
599 set_base_thread_from_ravenscar_task (inferior_ptid);
600 return beneath ()->stopped_by_hw_breakpoint ();
601 }
602
603 /* Implement the to_stopped_by_watchpoint target_ops "method". */
604
605 bool
606 ravenscar_thread_target::stopped_by_watchpoint ()
607 {
608 scoped_restore_current_thread saver;
609 set_base_thread_from_ravenscar_task (inferior_ptid);
610 return beneath ()->stopped_by_watchpoint ();
611 }
612
613 /* Implement the to_stopped_data_address target_ops "method". */
614
615 bool
616 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
617 {
618 scoped_restore_current_thread saver;
619 set_base_thread_from_ravenscar_task (inferior_ptid);
620 return beneath ()->stopped_data_address (addr_p);
621 }
622
623 void
624 ravenscar_thread_target::mourn_inferior ()
625 {
626 m_base_ptid = null_ptid;
627 target_ops *beneath = this->beneath ();
628 current_inferior ()->unpush_target (this);
629 beneath->mourn_inferior ();
630 }
631
632 /* Implement the to_core_of_thread target_ops "method". */
633
634 int
635 ravenscar_thread_target::core_of_thread (ptid_t ptid)
636 {
637 scoped_restore_current_thread saver;
638 set_base_thread_from_ravenscar_task (inferior_ptid);
639 return beneath ()->core_of_thread (inferior_ptid);
640 }
641
642 /* Implement the target xfer_partial method. */
643
644 enum target_xfer_status
645 ravenscar_thread_target::xfer_partial (enum target_object object,
646 const char *annex,
647 gdb_byte *readbuf,
648 const gdb_byte *writebuf,
649 ULONGEST offset, ULONGEST len,
650 ULONGEST *xfered_len)
651 {
652 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
653 /* Calling get_base_thread_from_ravenscar_task can read memory from
654 the inferior. However, that function is written to prefer our
655 internal map, so it should not result in recursive calls in
656 practice. */
657 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
658 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
659 offset, len, xfered_len);
660 }
661
662 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
663
664 static void
665 ravenscar_inferior_created (inferior *inf)
666 {
667 const char *err_msg;
668
669 if (!ravenscar_task_support
670 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
671 || !has_ravenscar_runtime ())
672 return;
673
674 err_msg = ada_get_tcb_types_info ();
675 if (err_msg != NULL)
676 {
677 warning (_("%s. Task/thread support disabled."), err_msg);
678 return;
679 }
680
681 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
682 inf->push_target (target_ops_up (rtarget));
683 thread_info *thr = rtarget->add_active_thread ();
684 if (thr != nullptr)
685 switch_to_thread (thr);
686 }
687
688 ptid_t
689 ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
690 {
691 return ptid_t (m_base_ptid.pid (), 0, thread);
692 }
693
694 /* Command-list for the "set/show ravenscar" prefix command. */
695 static struct cmd_list_element *set_ravenscar_list;
696 static struct cmd_list_element *show_ravenscar_list;
697
698 /* Implement the "show ravenscar task-switching" command. */
699
700 static void
701 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
702 struct cmd_list_element *c,
703 const char *value)
704 {
705 if (ravenscar_task_support)
706 gdb_printf (file, _("\
707 Support for Ravenscar task/thread switching is enabled\n"));
708 else
709 gdb_printf (file, _("\
710 Support for Ravenscar task/thread switching is disabled\n"));
711 }
712
713 /* Module startup initialization function, automagically called by
714 init.c. */
715
716 void _initialize_ravenscar ();
717 void
718 _initialize_ravenscar ()
719 {
720 /* Notice when the inferior is created in order to push the
721 ravenscar ops if needed. */
722 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
723 "ravenscar-thread");
724
725 add_setshow_prefix_cmd
726 ("ravenscar", no_class,
727 _("Prefix command for changing Ravenscar-specific settings."),
728 _("Prefix command for showing Ravenscar-specific settings."),
729 &set_ravenscar_list, &show_ravenscar_list,
730 &setlist, &showlist);
731
732 add_setshow_boolean_cmd ("task-switching", class_obscure,
733 &ravenscar_task_support, _("\
734 Enable or disable support for GNAT Ravenscar tasks."), _("\
735 Show whether support for GNAT Ravenscar tasks is enabled."),
736 _("\
737 Enable or disable support for task/thread switching with the GNAT\n\
738 Ravenscar run-time library for bareboard configuration."),
739 NULL, show_ravenscar_task_switching_command,
740 &set_ravenscar_list, &show_ravenscar_list);
741 }