9b04d060149c41edbf0d5c030d1a05d96da00bce
[binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46
47 static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51 };
52
53 /* The target_ops of record-btrace. */
54
55 class record_btrace_target final : public target_ops
56 {
57 public:
58 const target_info &info () const override
59 { return record_btrace_target_info; }
60
61 strata stratum () const override { return record_stratum; }
62
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
124 bool thread_alive (ptid_t ptid) override;
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
129 bool can_execute_reverse () override;
130
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
133
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140 };
141
142 static record_btrace_target record_btrace_ops;
143
144 /* Initialize the record-btrace target ops. */
145
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token {};
149
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only[] = "read-only";
152 static const char replay_memory_access_read_write[] = "read-write";
153 static const char *const replay_memory_access_types[] =
154 {
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158 };
159
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access = replay_memory_access_read_only;
162
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
165 {
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169 };
170
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu;
176
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element *set_record_btrace_cmdlist;
179 static struct cmd_list_element *show_record_btrace_cmdlist;
180
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile;
189
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf;
192
193 /* Command list for "record btrace". */
194 static struct cmd_list_element *record_btrace_cmdlist;
195
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210 #define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu *
223 record_btrace_get_cpu (void)
224 {
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238 }
239
240 /* Update the branch trace for the current thread and return a pointer to its
241 thread_info.
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
246 static struct thread_info *
247 require_btrace_thread (void)
248 {
249 DEBUG ("require");
250
251 if (inferior_ptid == null_ptid)
252 error (_("No thread."));
253
254 thread_info *tp = inferior_thread ();
255
256 validate_registers_access ();
257
258 btrace_fetch (tp, record_btrace_get_cpu ());
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264 }
265
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272 static struct btrace_thread_info *
273 require_btrace (void)
274 {
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280 }
281
282 /* Enable branch tracing for one thread. Warn on errors. */
283
284 static void
285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287 try
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
291 catch (const gdb_exception_error &error)
292 {
293 warning ("%s", error.what ());
294 }
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382
383 DEBUG ("open");
384
385 record_preopen ();
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
390 for (thread_info *tp : all_non_exited_threads ())
391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 {
393 btrace_enable (tp, &record_btrace_conf);
394
395 btrace_disable.add_thread (tp);
396 }
397
398 record_btrace_push_target ();
399
400 btrace_disable.discard ();
401 }
402
403 /* The stop_recording method of target record-btrace. */
404
405 void
406 record_btrace_target::stop_recording ()
407 {
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
412 for (thread_info *tp : all_non_exited_threads ())
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415 }
416
417 /* The disconnect method of target record-btrace. */
418
419 void
420 record_btrace_target::disconnect (const char *args,
421 int from_tty)
422 {
423 struct target_ops *beneath = this->beneath ();
424
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
427
428 /* Forward disconnect. */
429 beneath->disconnect (args, from_tty);
430 }
431
432 /* The close method of target record-btrace. */
433
434 void
435 record_btrace_target::close ()
436 {
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info *tp : all_non_exited_threads ())
447 btrace_teardown (tp);
448 }
449
450 /* The async method of target record-btrace. */
451
452 void
453 record_btrace_target::async (int enable)
454 {
455 if (enable)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
460 this->beneath ()->async (enable);
461 }
462
463 /* Adjusts the size and returns a human readable size suffix. */
464
465 static const char *
466 record_btrace_adjust_size (unsigned int *size)
467 {
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489 }
490
491 /* Print a BTS configuration. */
492
493 static void
494 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495 {
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505 }
506
507 /* Print an Intel Processor Trace configuration. */
508
509 static void
510 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511 {
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521 }
522
523 /* Print a branch tracing configuration. */
524
525 static void
526 record_btrace_print_conf (const struct btrace_config *conf)
527 {
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
546 }
547
548 /* The info_record method of target record-btrace. */
549
550 void
551 record_btrace_target::info_record ()
552 {
553 struct btrace_thread_info *btinfo;
554 const struct btrace_config *conf;
555 struct thread_info *tp;
556 unsigned int insns, calls, gaps;
557
558 DEBUG ("info");
559
560 if (inferior_ptid == null_ptid)
561 error (_("No thread."));
562
563 tp = inferior_thread ();
564
565 validate_registers_access ();
566
567 btinfo = &tp->btrace;
568
569 conf = ::btrace_conf (btinfo);
570 if (conf != NULL)
571 record_btrace_print_conf (conf);
572
573 btrace_fetch (tp, record_btrace_get_cpu ());
574
575 insns = 0;
576 calls = 0;
577 gaps = 0;
578
579 if (!btrace_is_empty (tp))
580 {
581 struct btrace_call_iterator call;
582 struct btrace_insn_iterator insn;
583
584 btrace_call_end (&call, btinfo);
585 btrace_call_prev (&call, 1);
586 calls = btrace_call_number (&call);
587
588 btrace_insn_end (&insn, btinfo);
589 insns = btrace_insn_number (&insn);
590
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn) != NULL)
594 insns -= 1;
595
596 gaps = btinfo->ngaps;
597 }
598
599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
600 "for thread %s (%s).\n"), insns, calls, gaps,
601 print_thread_id (tp),
602 target_pid_to_str (tp->ptid).c_str ());
603
604 if (btrace_is_replaying (tp))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo->replay));
607 }
608
609 /* Print a decode error. */
610
611 static void
612 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
613 enum btrace_format format)
614 {
615 const char *errstr = btrace_decode_error (format, errcode);
616
617 uiout->text (_("["));
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
620 {
621 uiout->text (_("decode error ("));
622 uiout->field_signed ("errcode", errcode);
623 uiout->text (_("): "));
624 }
625 uiout->text (errstr);
626 uiout->text (_("]\n"));
627 }
628
629 /* A range of source lines. */
630
631 struct btrace_line_range
632 {
633 /* The symtab this line is from. */
634 struct symtab *symtab;
635
636 /* The first line (inclusive). */
637 int begin;
638
639 /* The last line (exclusive). */
640 int end;
641 };
642
643 /* Construct a line range. */
644
645 static struct btrace_line_range
646 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
647 {
648 struct btrace_line_range range;
649
650 range.symtab = symtab;
651 range.begin = begin;
652 range.end = end;
653
654 return range;
655 }
656
657 /* Add a line to a line range. */
658
659 static struct btrace_line_range
660 btrace_line_range_add (struct btrace_line_range range, int line)
661 {
662 if (range.end <= range.begin)
663 {
664 /* This is the first entry. */
665 range.begin = line;
666 range.end = line + 1;
667 }
668 else if (line < range.begin)
669 range.begin = line;
670 else if (range.end < line)
671 range.end = line;
672
673 return range;
674 }
675
676 /* Return non-zero if RANGE is empty, zero otherwise. */
677
678 static int
679 btrace_line_range_is_empty (struct btrace_line_range range)
680 {
681 return range.end <= range.begin;
682 }
683
684 /* Return non-zero if LHS contains RHS, zero otherwise. */
685
686 static int
687 btrace_line_range_contains_range (struct btrace_line_range lhs,
688 struct btrace_line_range rhs)
689 {
690 return ((lhs.symtab == rhs.symtab)
691 && (lhs.begin <= rhs.begin)
692 && (rhs.end <= lhs.end));
693 }
694
695 /* Find the line range associated with PC. */
696
697 static struct btrace_line_range
698 btrace_find_line_range (CORE_ADDR pc)
699 {
700 struct btrace_line_range range;
701 struct linetable_entry *lines;
702 struct linetable *ltable;
703 struct symtab *symtab;
704 int nlines, i;
705
706 symtab = find_pc_line_symtab (pc);
707 if (symtab == NULL)
708 return btrace_mk_line_range (NULL, 0, 0);
709
710 ltable = SYMTAB_LINETABLE (symtab);
711 if (ltable == NULL)
712 return btrace_mk_line_range (symtab, 0, 0);
713
714 nlines = ltable->nitems;
715 lines = ltable->item;
716 if (nlines <= 0)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 range = btrace_mk_line_range (symtab, 0, 0);
720 for (i = 0; i < nlines - 1; i++)
721 {
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines[i].pc == pc) && (lines[i].line != 0)
731 && (lines[i].is_stmt == 1))
732 range = btrace_line_range_add (range, lines[i].line);
733 }
734
735 return range;
736 }
737
738 /* Print source lines in LINES to UIOUT.
739
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
746
747 static void
748 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
749 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
750 gdb::optional<ui_out_emit_list> *asm_list,
751 gdb_disassembly_flags flags)
752 {
753 print_source_lines_flags psl_flags;
754
755 if (flags & DISASSEMBLY_FILENAME)
756 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
757
758 for (int line = lines.begin; line < lines.end; ++line)
759 {
760 asm_list->reset ();
761
762 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
763
764 print_source_lines (lines.symtab, line, line + 1, psl_flags);
765
766 asm_list->emplace (uiout, "line_asm_insn");
767 }
768 }
769
770 /* Disassemble a section of the recorded instruction trace. */
771
772 static void
773 btrace_insn_history (struct ui_out *uiout,
774 const struct btrace_thread_info *btinfo,
775 const struct btrace_insn_iterator *begin,
776 const struct btrace_insn_iterator *end,
777 gdb_disassembly_flags flags)
778 {
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
780 btrace_insn_number (begin), btrace_insn_number (end));
781
782 flags |= DISASSEMBLY_SPECULATIVE;
783
784 struct gdbarch *gdbarch = target_gdbarch ();
785 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
786
787 ui_out_emit_list list_emitter (uiout, "asm_insns");
788
789 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
790 gdb::optional<ui_out_emit_list> asm_list;
791
792 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
793
794 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
795 btrace_insn_next (&it, 1))
796 {
797 const struct btrace_insn *insn;
798
799 insn = btrace_insn_get (&it);
800
801 /* A NULL instruction indicates a gap in the trace. */
802 if (insn == NULL)
803 {
804 const struct btrace_config *conf;
805
806 conf = btrace_conf (btinfo);
807
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf != NULL);
810
811 uiout->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it));
813 uiout->text ("\t");
814
815 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
816 conf->format);
817 }
818 else
819 {
820 struct disasm_insn dinsn;
821
822 if ((flags & DISASSEMBLY_SOURCE) != 0)
823 {
824 struct btrace_line_range lines;
825
826 lines = btrace_find_line_range (insn->pc);
827 if (!btrace_line_range_is_empty (lines)
828 && !btrace_line_range_contains_range (last_lines, lines))
829 {
830 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 flags);
832 last_lines = lines;
833 }
834 else if (!src_and_asm_tuple.has_value ())
835 {
836 gdb_assert (!asm_list.has_value ());
837
838 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
839
840 /* No source information. */
841 asm_list.emplace (uiout, "line_asm_insn");
842 }
843
844 gdb_assert (src_and_asm_tuple.has_value ());
845 gdb_assert (asm_list.has_value ());
846 }
847
848 memset (&dinsn, 0, sizeof (dinsn));
849 dinsn.number = btrace_insn_number (&it);
850 dinsn.addr = insn->pc;
851
852 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
853 dinsn.is_speculative = 1;
854
855 disasm.pretty_print_insn (&dinsn, flags);
856 }
857 }
858 }
859
860 /* The insn_history method of target record-btrace. */
861
862 void
863 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
864 {
865 struct btrace_thread_info *btinfo;
866 struct btrace_insn_history *history;
867 struct btrace_insn_iterator begin, end;
868 struct ui_out *uiout;
869 unsigned int context, covered;
870
871 uiout = current_uiout;
872 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
873 context = abs (size);
874 if (context == 0)
875 error (_("Bad record instruction-history-size."));
876
877 btinfo = require_btrace ();
878 history = btinfo->insn_history;
879 if (history == NULL)
880 {
881 struct btrace_insn_iterator *replay;
882
883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
884
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay = btinfo->replay;
888 if (replay != NULL)
889 begin = *replay;
890 else
891 btrace_insn_end (&begin, btinfo);
892
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
895 context. */
896 end = begin;
897 if (size < 0)
898 {
899 /* We want the current position covered, as well. */
900 covered = btrace_insn_next (&end, 1);
901 covered += btrace_insn_prev (&begin, context - covered);
902 covered += btrace_insn_next (&end, context - covered);
903 }
904 else
905 {
906 covered = btrace_insn_next (&end, context);
907 covered += btrace_insn_prev (&begin, context - covered);
908 }
909 }
910 else
911 {
912 begin = history->begin;
913 end = history->end;
914
915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
916 btrace_insn_number (&begin), btrace_insn_number (&end));
917
918 if (size < 0)
919 {
920 end = begin;
921 covered = btrace_insn_prev (&begin, context);
922 }
923 else
924 {
925 begin = end;
926 covered = btrace_insn_next (&end, context);
927 }
928 }
929
930 if (covered > 0)
931 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
932 else
933 {
934 if (size < 0)
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
936 else
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
938 }
939
940 btrace_set_insn_history (btinfo, &begin, &end);
941 }
942
943 /* The insn_history_range method of target record-btrace. */
944
945 void
946 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
947 gdb_disassembly_flags flags)
948 {
949 struct btrace_thread_info *btinfo;
950 struct btrace_insn_iterator begin, end;
951 struct ui_out *uiout;
952 unsigned int low, high;
953 int found;
954
955 uiout = current_uiout;
956 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
957 low = from;
958 high = to;
959
960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
961
962 /* Check for wrap-arounds. */
963 if (low != from || high != to)
964 error (_("Bad range."));
965
966 if (high < low)
967 error (_("Bad range."));
968
969 btinfo = require_btrace ();
970
971 found = btrace_find_insn_by_number (&begin, btinfo, low);
972 if (found == 0)
973 error (_("Range out of bounds."));
974
975 found = btrace_find_insn_by_number (&end, btinfo, high);
976 if (found == 0)
977 {
978 /* Silently truncate the range. */
979 btrace_insn_end (&end, btinfo);
980 }
981 else
982 {
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end, 1);
985 }
986
987 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
988 btrace_set_insn_history (btinfo, &begin, &end);
989 }
990
991 /* The insn_history_from method of target record-btrace. */
992
993 void
994 record_btrace_target::insn_history_from (ULONGEST from, int size,
995 gdb_disassembly_flags flags)
996 {
997 ULONGEST begin, end, context;
998
999 context = abs (size);
1000 if (context == 0)
1001 error (_("Bad record instruction-history-size."));
1002
1003 if (size < 0)
1004 {
1005 end = from;
1006
1007 if (from < context)
1008 begin = 0;
1009 else
1010 begin = from - context + 1;
1011 }
1012 else
1013 {
1014 begin = from;
1015 end = from + context - 1;
1016
1017 /* Check for wrap-around. */
1018 if (end < begin)
1019 end = ULONGEST_MAX;
1020 }
1021
1022 insn_history_range (begin, end, flags);
1023 }
1024
1025 /* Print the instruction number range for a function call history line. */
1026
1027 static void
1028 btrace_call_history_insn_range (struct ui_out *uiout,
1029 const struct btrace_function *bfun)
1030 {
1031 unsigned int begin, end, size;
1032
1033 size = bfun->insn.size ();
1034 gdb_assert (size > 0);
1035
1036 begin = bfun->insn_offset;
1037 end = begin + size - 1;
1038
1039 uiout->field_unsigned ("insn begin", begin);
1040 uiout->text (",");
1041 uiout->field_unsigned ("insn end", end);
1042 }
1043
1044 /* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1048
1049 static void
1050 btrace_compute_src_line_range (const struct btrace_function *bfun,
1051 int *pbegin, int *pend)
1052 {
1053 struct symtab *symtab;
1054 struct symbol *sym;
1055 int begin, end;
1056
1057 begin = INT_MAX;
1058 end = INT_MIN;
1059
1060 sym = bfun->sym;
1061 if (sym == NULL)
1062 goto out;
1063
1064 symtab = symbol_symtab (sym);
1065
1066 for (const btrace_insn &insn : bfun->insn)
1067 {
1068 struct symtab_and_line sal;
1069
1070 sal = find_pc_line (insn.pc, 0);
1071 if (sal.symtab != symtab || sal.line == 0)
1072 continue;
1073
1074 begin = std::min (begin, sal.line);
1075 end = std::max (end, sal.line);
1076 }
1077
1078 out:
1079 *pbegin = begin;
1080 *pend = end;
1081 }
1082
1083 /* Print the source line information for a function call history line. */
1084
1085 static void
1086 btrace_call_history_src_line (struct ui_out *uiout,
1087 const struct btrace_function *bfun)
1088 {
1089 struct symbol *sym;
1090 int begin, end;
1091
1092 sym = bfun->sym;
1093 if (sym == NULL)
1094 return;
1095
1096 uiout->field_string ("file",
1097 symtab_to_filename_for_display (symbol_symtab (sym)),
1098 file_name_style.style ());
1099
1100 btrace_compute_src_line_range (bfun, &begin, &end);
1101 if (end < begin)
1102 return;
1103
1104 uiout->text (":");
1105 uiout->field_signed ("min line", begin);
1106
1107 if (end == begin)
1108 return;
1109
1110 uiout->text (",");
1111 uiout->field_signed ("max line", end);
1112 }
1113
1114 /* Get the name of a branch trace function. */
1115
1116 static const char *
1117 btrace_get_bfun_name (const struct btrace_function *bfun)
1118 {
1119 struct minimal_symbol *msym;
1120 struct symbol *sym;
1121
1122 if (bfun == NULL)
1123 return "??";
1124
1125 msym = bfun->msym;
1126 sym = bfun->sym;
1127
1128 if (sym != NULL)
1129 return sym->print_name ();
1130 else if (msym != NULL)
1131 return msym->print_name ();
1132 else
1133 return "??";
1134 }
1135
1136 /* Disassemble a section of the recorded function trace. */
1137
1138 static void
1139 btrace_call_history (struct ui_out *uiout,
1140 const struct btrace_thread_info *btinfo,
1141 const struct btrace_call_iterator *begin,
1142 const struct btrace_call_iterator *end,
1143 int int_flags)
1144 {
1145 struct btrace_call_iterator it;
1146 record_print_flags flags = (enum record_print_flag) int_flags;
1147
1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1149 btrace_call_number (end));
1150
1151 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1152 {
1153 const struct btrace_function *bfun;
1154 struct minimal_symbol *msym;
1155 struct symbol *sym;
1156
1157 bfun = btrace_call_get (&it);
1158 sym = bfun->sym;
1159 msym = bfun->msym;
1160
1161 /* Print the function index. */
1162 uiout->field_unsigned ("index", bfun->number);
1163 uiout->text ("\t");
1164
1165 /* Indicate gaps in the trace. */
1166 if (bfun->errcode != 0)
1167 {
1168 const struct btrace_config *conf;
1169
1170 conf = btrace_conf (btinfo);
1171
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf != NULL);
1174
1175 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1176
1177 continue;
1178 }
1179
1180 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1181 {
1182 int level = bfun->level + btinfo->level, i;
1183
1184 for (i = 0; i < level; ++i)
1185 uiout->text (" ");
1186 }
1187
1188 if (sym != NULL)
1189 uiout->field_string ("function", sym->print_name (),
1190 function_name_style.style ());
1191 else if (msym != NULL)
1192 uiout->field_string ("function", msym->print_name (),
1193 function_name_style.style ());
1194 else if (!uiout->is_mi_like_p ())
1195 uiout->field_string ("function", "??",
1196 function_name_style.style ());
1197
1198 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1199 {
1200 uiout->text (_("\tinst "));
1201 btrace_call_history_insn_range (uiout, bfun);
1202 }
1203
1204 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1205 {
1206 uiout->text (_("\tat "));
1207 btrace_call_history_src_line (uiout, bfun);
1208 }
1209
1210 uiout->text ("\n");
1211 }
1212 }
1213
1214 /* The call_history method of target record-btrace. */
1215
1216 void
1217 record_btrace_target::call_history (int size, record_print_flags flags)
1218 {
1219 struct btrace_thread_info *btinfo;
1220 struct btrace_call_history *history;
1221 struct btrace_call_iterator begin, end;
1222 struct ui_out *uiout;
1223 unsigned int context, covered;
1224
1225 uiout = current_uiout;
1226 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1227 context = abs (size);
1228 if (context == 0)
1229 error (_("Bad record function-call-history-size."));
1230
1231 btinfo = require_btrace ();
1232 history = btinfo->call_history;
1233 if (history == NULL)
1234 {
1235 struct btrace_insn_iterator *replay;
1236
1237 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1238
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay = btinfo->replay;
1242 if (replay != NULL)
1243 {
1244 begin.btinfo = btinfo;
1245 begin.index = replay->call_index;
1246 }
1247 else
1248 btrace_call_end (&begin, btinfo);
1249
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1252 context. */
1253 end = begin;
1254 if (size < 0)
1255 {
1256 /* We want the current position covered, as well. */
1257 covered = btrace_call_next (&end, 1);
1258 covered += btrace_call_prev (&begin, context - covered);
1259 covered += btrace_call_next (&end, context - covered);
1260 }
1261 else
1262 {
1263 covered = btrace_call_next (&end, context);
1264 covered += btrace_call_prev (&begin, context- covered);
1265 }
1266 }
1267 else
1268 {
1269 begin = history->begin;
1270 end = history->end;
1271
1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1273 btrace_call_number (&begin), btrace_call_number (&end));
1274
1275 if (size < 0)
1276 {
1277 end = begin;
1278 covered = btrace_call_prev (&begin, context);
1279 }
1280 else
1281 {
1282 begin = end;
1283 covered = btrace_call_next (&end, context);
1284 }
1285 }
1286
1287 if (covered > 0)
1288 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1289 else
1290 {
1291 if (size < 0)
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1293 else
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1295 }
1296
1297 btrace_set_call_history (btinfo, &begin, &end);
1298 }
1299
1300 /* The call_history_range method of target record-btrace. */
1301
1302 void
1303 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1304 record_print_flags flags)
1305 {
1306 struct btrace_thread_info *btinfo;
1307 struct btrace_call_iterator begin, end;
1308 struct ui_out *uiout;
1309 unsigned int low, high;
1310 int found;
1311
1312 uiout = current_uiout;
1313 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1314 low = from;
1315 high = to;
1316
1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1318
1319 /* Check for wrap-arounds. */
1320 if (low != from || high != to)
1321 error (_("Bad range."));
1322
1323 if (high < low)
1324 error (_("Bad range."));
1325
1326 btinfo = require_btrace ();
1327
1328 found = btrace_find_call_by_number (&begin, btinfo, low);
1329 if (found == 0)
1330 error (_("Range out of bounds."));
1331
1332 found = btrace_find_call_by_number (&end, btinfo, high);
1333 if (found == 0)
1334 {
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end, btinfo);
1337 }
1338 else
1339 {
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end, 1);
1342 }
1343
1344 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1345 btrace_set_call_history (btinfo, &begin, &end);
1346 }
1347
1348 /* The call_history_from method of target record-btrace. */
1349
1350 void
1351 record_btrace_target::call_history_from (ULONGEST from, int size,
1352 record_print_flags flags)
1353 {
1354 ULONGEST begin, end, context;
1355
1356 context = abs (size);
1357 if (context == 0)
1358 error (_("Bad record function-call-history-size."));
1359
1360 if (size < 0)
1361 {
1362 end = from;
1363
1364 if (from < context)
1365 begin = 0;
1366 else
1367 begin = from - context + 1;
1368 }
1369 else
1370 {
1371 begin = from;
1372 end = from + context - 1;
1373
1374 /* Check for wrap-around. */
1375 if (end < begin)
1376 end = ULONGEST_MAX;
1377 }
1378
1379 call_history_range ( begin, end, flags);
1380 }
1381
1382 /* The record_method method of target record-btrace. */
1383
1384 enum record_method
1385 record_btrace_target::record_method (ptid_t ptid)
1386 {
1387 process_stratum_target *proc_target = current_inferior ()->process_target ();
1388 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1389
1390 if (tp == NULL)
1391 error (_("No thread."));
1392
1393 if (tp->btrace.target == NULL)
1394 return RECORD_METHOD_NONE;
1395
1396 return RECORD_METHOD_BTRACE;
1397 }
1398
1399 /* The record_is_replaying method of target record-btrace. */
1400
1401 bool
1402 record_btrace_target::record_is_replaying (ptid_t ptid)
1403 {
1404 process_stratum_target *proc_target = current_inferior ()->process_target ();
1405 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1406 if (btrace_is_replaying (tp))
1407 return true;
1408
1409 return false;
1410 }
1411
1412 /* The record_will_replay method of target record-btrace. */
1413
1414 bool
1415 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1416 {
1417 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1418 }
1419
1420 /* The xfer_partial method of target record-btrace. */
1421
1422 enum target_xfer_status
1423 record_btrace_target::xfer_partial (enum target_object object,
1424 const char *annex, gdb_byte *readbuf,
1425 const gdb_byte *writebuf, ULONGEST offset,
1426 ULONGEST len, ULONGEST *xfered_len)
1427 {
1428 /* Filter out requests that don't make sense during replay. */
1429 if (replay_memory_access == replay_memory_access_read_only
1430 && !record_btrace_generating_corefile
1431 && record_is_replaying (inferior_ptid))
1432 {
1433 switch (object)
1434 {
1435 case TARGET_OBJECT_MEMORY:
1436 {
1437 struct target_section *section;
1438
1439 /* We do not allow writing memory in general. */
1440 if (writebuf != NULL)
1441 {
1442 *xfered_len = len;
1443 return TARGET_XFER_UNAVAILABLE;
1444 }
1445
1446 /* We allow reading readonly memory. */
1447 section = target_section_by_addr (this, offset);
1448 if (section != NULL)
1449 {
1450 /* Check if the section we found is readonly. */
1451 if ((bfd_section_flags (section->the_bfd_section)
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
1455 len = std::min (len, section->endaddr - offset);
1456 break;
1457 }
1458 }
1459
1460 *xfered_len = len;
1461 return TARGET_XFER_UNAVAILABLE;
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
1467 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
1469 }
1470
1471 /* The insert_breakpoint method of target record-btrace. */
1472
1473 int
1474 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
1476 {
1477 const char *old;
1478 int ret;
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
1484
1485 ret = 0;
1486 try
1487 {
1488 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1489 }
1490 catch (const gdb_exception &except)
1491 {
1492 replay_memory_access = old;
1493 throw;
1494 }
1495 replay_memory_access = old;
1496
1497 return ret;
1498 }
1499
1500 /* The remove_breakpoint method of target record-btrace. */
1501
1502 int
1503 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
1506 {
1507 const char *old;
1508 int ret;
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
1514
1515 ret = 0;
1516 try
1517 {
1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1519 }
1520 catch (const gdb_exception &except)
1521 {
1522 replay_memory_access = old;
1523 throw;
1524 }
1525 replay_memory_access = old;
1526
1527 return ret;
1528 }
1529
1530 /* The fetch_registers method of target record-btrace. */
1531
1532 void
1533 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1534 {
1535 btrace_insn_iterator *replay = nullptr;
1536
1537 /* Thread-db may ask for a thread's registers before GDB knows about the
1538 thread. We forward the request to the target beneath in this
1539 case. */
1540 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1541 if (tp != nullptr)
1542 replay = tp->btrace.replay;
1543
1544 if (replay != nullptr && !record_btrace_generating_corefile)
1545 {
1546 const struct btrace_insn *insn;
1547 struct gdbarch *gdbarch;
1548 int pcreg;
1549
1550 gdbarch = regcache->arch ();
1551 pcreg = gdbarch_pc_regnum (gdbarch);
1552 if (pcreg < 0)
1553 return;
1554
1555 /* We can only provide the PC register. */
1556 if (regno >= 0 && regno != pcreg)
1557 return;
1558
1559 insn = btrace_insn_get (replay);
1560 gdb_assert (insn != NULL);
1561
1562 regcache->raw_supply (regno, &insn->pc);
1563 }
1564 else
1565 this->beneath ()->fetch_registers (regcache, regno);
1566 }
1567
1568 /* The store_registers method of target record-btrace. */
1569
1570 void
1571 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1572 {
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache->ptid ()))
1575 error (_("Cannot write registers while replaying."));
1576
1577 gdb_assert (may_write_registers);
1578
1579 this->beneath ()->store_registers (regcache, regno);
1580 }
1581
1582 /* The prepare_to_store method of target record-btrace. */
1583
1584 void
1585 record_btrace_target::prepare_to_store (struct regcache *regcache)
1586 {
1587 if (!record_btrace_generating_corefile
1588 && record_is_replaying (regcache->ptid ()))
1589 return;
1590
1591 this->beneath ()->prepare_to_store (regcache);
1592 }
1593
1594 /* The branch trace frame cache. */
1595
1596 struct btrace_frame_cache
1597 {
1598 /* The thread. */
1599 struct thread_info *tp;
1600
1601 /* The frame info. */
1602 struct frame_info *frame;
1603
1604 /* The branch trace function segment. */
1605 const struct btrace_function *bfun;
1606 };
1607
1608 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1609
1610 static htab_t bfcache;
1611
1612 /* hash_f for htab_create_alloc of bfcache. */
1613
1614 static hashval_t
1615 bfcache_hash (const void *arg)
1616 {
1617 const struct btrace_frame_cache *cache
1618 = (const struct btrace_frame_cache *) arg;
1619
1620 return htab_hash_pointer (cache->frame);
1621 }
1622
1623 /* eq_f for htab_create_alloc of bfcache. */
1624
1625 static int
1626 bfcache_eq (const void *arg1, const void *arg2)
1627 {
1628 const struct btrace_frame_cache *cache1
1629 = (const struct btrace_frame_cache *) arg1;
1630 const struct btrace_frame_cache *cache2
1631 = (const struct btrace_frame_cache *) arg2;
1632
1633 return cache1->frame == cache2->frame;
1634 }
1635
1636 /* Create a new btrace frame cache. */
1637
1638 static struct btrace_frame_cache *
1639 bfcache_new (struct frame_info *frame)
1640 {
1641 struct btrace_frame_cache *cache;
1642 void **slot;
1643
1644 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1645 cache->frame = frame;
1646
1647 slot = htab_find_slot (bfcache, cache, INSERT);
1648 gdb_assert (*slot == NULL);
1649 *slot = cache;
1650
1651 return cache;
1652 }
1653
1654 /* Extract the branch trace function from a branch trace frame. */
1655
1656 static const struct btrace_function *
1657 btrace_get_frame_function (struct frame_info *frame)
1658 {
1659 const struct btrace_frame_cache *cache;
1660 struct btrace_frame_cache pattern;
1661 void **slot;
1662
1663 pattern.frame = frame;
1664
1665 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1666 if (slot == NULL)
1667 return NULL;
1668
1669 cache = (const struct btrace_frame_cache *) *slot;
1670 return cache->bfun;
1671 }
1672
1673 /* Implement stop_reason method for record_btrace_frame_unwind. */
1674
1675 static enum unwind_stop_reason
1676 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1677 void **this_cache)
1678 {
1679 const struct btrace_frame_cache *cache;
1680 const struct btrace_function *bfun;
1681
1682 cache = (const struct btrace_frame_cache *) *this_cache;
1683 bfun = cache->bfun;
1684 gdb_assert (bfun != NULL);
1685
1686 if (bfun->up == 0)
1687 return UNWIND_UNAVAILABLE;
1688
1689 return UNWIND_NO_REASON;
1690 }
1691
1692 /* Implement this_id method for record_btrace_frame_unwind. */
1693
1694 static void
1695 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1696 struct frame_id *this_id)
1697 {
1698 const struct btrace_frame_cache *cache;
1699 const struct btrace_function *bfun;
1700 struct btrace_call_iterator it;
1701 CORE_ADDR code, special;
1702
1703 cache = (const struct btrace_frame_cache *) *this_cache;
1704
1705 bfun = cache->bfun;
1706 gdb_assert (bfun != NULL);
1707
1708 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1709 bfun = btrace_call_get (&it);
1710
1711 code = get_frame_func (this_frame);
1712 special = bfun->number;
1713
1714 *this_id = frame_id_build_unavailable_stack_special (code, special);
1715
1716 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1717 btrace_get_bfun_name (cache->bfun),
1718 core_addr_to_string_nz (this_id->code_addr),
1719 core_addr_to_string_nz (this_id->special_addr));
1720 }
1721
1722 /* Implement prev_register method for record_btrace_frame_unwind. */
1723
1724 static struct value *
1725 record_btrace_frame_prev_register (struct frame_info *this_frame,
1726 void **this_cache,
1727 int regnum)
1728 {
1729 const struct btrace_frame_cache *cache;
1730 const struct btrace_function *bfun, *caller;
1731 struct btrace_call_iterator it;
1732 struct gdbarch *gdbarch;
1733 CORE_ADDR pc;
1734 int pcreg;
1735
1736 gdbarch = get_frame_arch (this_frame);
1737 pcreg = gdbarch_pc_regnum (gdbarch);
1738 if (pcreg < 0 || regnum != pcreg)
1739 throw_error (NOT_AVAILABLE_ERROR,
1740 _("Registers are not available in btrace record history"));
1741
1742 cache = (const struct btrace_frame_cache *) *this_cache;
1743 bfun = cache->bfun;
1744 gdb_assert (bfun != NULL);
1745
1746 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1747 throw_error (NOT_AVAILABLE_ERROR,
1748 _("No caller in btrace record history"));
1749
1750 caller = btrace_call_get (&it);
1751
1752 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1753 pc = caller->insn.front ().pc;
1754 else
1755 {
1756 pc = caller->insn.back ().pc;
1757 pc += gdb_insn_length (gdbarch, pc);
1758 }
1759
1760 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1761 btrace_get_bfun_name (bfun), bfun->level,
1762 core_addr_to_string_nz (pc));
1763
1764 return frame_unwind_got_address (this_frame, regnum, pc);
1765 }
1766
1767 /* Implement sniffer method for record_btrace_frame_unwind. */
1768
1769 static int
1770 record_btrace_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773 {
1774 const struct btrace_function *bfun;
1775 struct btrace_frame_cache *cache;
1776 struct thread_info *tp;
1777 struct frame_info *next;
1778
1779 /* THIS_FRAME does not contain a reference to its thread. */
1780 tp = inferior_thread ();
1781
1782 bfun = NULL;
1783 next = get_next_frame (this_frame);
1784 if (next == NULL)
1785 {
1786 const struct btrace_insn_iterator *replay;
1787
1788 replay = tp->btrace.replay;
1789 if (replay != NULL)
1790 bfun = &replay->btinfo->functions[replay->call_index];
1791 }
1792 else
1793 {
1794 const struct btrace_function *callee;
1795 struct btrace_call_iterator it;
1796
1797 callee = btrace_get_frame_function (next);
1798 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1799 return 0;
1800
1801 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1802 return 0;
1803
1804 bfun = btrace_call_get (&it);
1805 }
1806
1807 if (bfun == NULL)
1808 return 0;
1809
1810 DEBUG ("[frame] sniffed frame for %s on level %d",
1811 btrace_get_bfun_name (bfun), bfun->level);
1812
1813 /* This is our frame. Initialize the frame cache. */
1814 cache = bfcache_new (this_frame);
1815 cache->tp = tp;
1816 cache->bfun = bfun;
1817
1818 *this_cache = cache;
1819 return 1;
1820 }
1821
1822 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1823
1824 static int
1825 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1826 struct frame_info *this_frame,
1827 void **this_cache)
1828 {
1829 const struct btrace_function *bfun, *callee;
1830 struct btrace_frame_cache *cache;
1831 struct btrace_call_iterator it;
1832 struct frame_info *next;
1833 struct thread_info *tinfo;
1834
1835 next = get_next_frame (this_frame);
1836 if (next == NULL)
1837 return 0;
1838
1839 callee = btrace_get_frame_function (next);
1840 if (callee == NULL)
1841 return 0;
1842
1843 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1844 return 0;
1845
1846 tinfo = inferior_thread ();
1847 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1848 return 0;
1849
1850 bfun = btrace_call_get (&it);
1851
1852 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1853 btrace_get_bfun_name (bfun), bfun->level);
1854
1855 /* This is our frame. Initialize the frame cache. */
1856 cache = bfcache_new (this_frame);
1857 cache->tp = tinfo;
1858 cache->bfun = bfun;
1859
1860 *this_cache = cache;
1861 return 1;
1862 }
1863
1864 static void
1865 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1866 {
1867 struct btrace_frame_cache *cache;
1868 void **slot;
1869
1870 cache = (struct btrace_frame_cache *) this_cache;
1871
1872 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1873 gdb_assert (slot != NULL);
1874
1875 htab_remove_elt (bfcache, cache);
1876 }
1877
1878 /* btrace recording does not store previous memory content, neither the stack
1879 frames content. Any unwinding would return erroneous results as the stack
1880 contents no longer matches the changed PC value restored from history.
1881 Therefore this unwinder reports any possibly unwound registers as
1882 <unavailable>. */
1883
1884 const struct frame_unwind record_btrace_frame_unwind =
1885 {
1886 NORMAL_FRAME,
1887 record_btrace_frame_unwind_stop_reason,
1888 record_btrace_frame_this_id,
1889 record_btrace_frame_prev_register,
1890 NULL,
1891 record_btrace_frame_sniffer,
1892 record_btrace_frame_dealloc_cache
1893 };
1894
1895 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1896 {
1897 TAILCALL_FRAME,
1898 record_btrace_frame_unwind_stop_reason,
1899 record_btrace_frame_this_id,
1900 record_btrace_frame_prev_register,
1901 NULL,
1902 record_btrace_tailcall_frame_sniffer,
1903 record_btrace_frame_dealloc_cache
1904 };
1905
1906 /* Implement the get_unwinder method. */
1907
1908 const struct frame_unwind *
1909 record_btrace_target::get_unwinder ()
1910 {
1911 return &record_btrace_frame_unwind;
1912 }
1913
1914 /* Implement the get_tailcall_unwinder method. */
1915
1916 const struct frame_unwind *
1917 record_btrace_target::get_tailcall_unwinder ()
1918 {
1919 return &record_btrace_tailcall_frame_unwind;
1920 }
1921
1922 /* Return a human-readable string for FLAG. */
1923
1924 static const char *
1925 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1926 {
1927 switch (flag)
1928 {
1929 case BTHR_STEP:
1930 return "step";
1931
1932 case BTHR_RSTEP:
1933 return "reverse-step";
1934
1935 case BTHR_CONT:
1936 return "cont";
1937
1938 case BTHR_RCONT:
1939 return "reverse-cont";
1940
1941 case BTHR_STOP:
1942 return "stop";
1943 }
1944
1945 return "<invalid>";
1946 }
1947
1948 /* Indicate that TP should be resumed according to FLAG. */
1949
1950 static void
1951 record_btrace_resume_thread (struct thread_info *tp,
1952 enum btrace_thread_flag flag)
1953 {
1954 struct btrace_thread_info *btinfo;
1955
1956 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1957 target_pid_to_str (tp->ptid).c_str (), flag,
1958 btrace_thread_flag_to_str (flag));
1959
1960 btinfo = &tp->btrace;
1961
1962 /* Fetch the latest branch trace. */
1963 btrace_fetch (tp, record_btrace_get_cpu ());
1964
1965 /* A resume request overwrites a preceding resume or stop request. */
1966 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1967 btinfo->flags |= flag;
1968 }
1969
1970 /* Get the current frame for TP. */
1971
1972 static struct frame_id
1973 get_thread_current_frame_id (struct thread_info *tp)
1974 {
1975 struct frame_id id;
1976 bool executing;
1977
1978 /* Set current thread, which is implicitly used by
1979 get_current_frame. */
1980 scoped_restore_current_thread restore_thread;
1981
1982 switch_to_thread (tp);
1983
1984 process_stratum_target *proc_target = tp->inf->process_target ();
1985
1986 /* Clear the executing flag to allow changes to the current frame.
1987 We are not actually running, yet. We just started a reverse execution
1988 command or a record goto command.
1989 For the latter, EXECUTING is false and this has no effect.
1990 For the former, EXECUTING is true and we're in wait, about to
1991 move the thread. Since we need to recompute the stack, we temporarily
1992 set EXECUTING to false. */
1993 executing = tp->executing;
1994 set_executing (proc_target, inferior_ptid, false);
1995
1996 id = null_frame_id;
1997 try
1998 {
1999 id = get_frame_id (get_current_frame ());
2000 }
2001 catch (const gdb_exception &except)
2002 {
2003 /* Restore the previous execution state. */
2004 set_executing (proc_target, inferior_ptid, executing);
2005
2006 throw;
2007 }
2008
2009 /* Restore the previous execution state. */
2010 set_executing (proc_target, inferior_ptid, executing);
2011
2012 return id;
2013 }
2014
2015 /* Start replaying a thread. */
2016
2017 static struct btrace_insn_iterator *
2018 record_btrace_start_replaying (struct thread_info *tp)
2019 {
2020 struct btrace_insn_iterator *replay;
2021 struct btrace_thread_info *btinfo;
2022
2023 btinfo = &tp->btrace;
2024 replay = NULL;
2025
2026 /* We can't start replaying without trace. */
2027 if (btinfo->functions.empty ())
2028 return NULL;
2029
2030 /* GDB stores the current frame_id when stepping in order to detects steps
2031 into subroutines.
2032 Since frames are computed differently when we're replaying, we need to
2033 recompute those stored frames and fix them up so we can still detect
2034 subroutines after we started replaying. */
2035 try
2036 {
2037 struct frame_id frame_id;
2038 int upd_step_frame_id, upd_step_stack_frame_id;
2039
2040 /* The current frame without replaying - computed via normal unwind. */
2041 frame_id = get_thread_current_frame_id (tp);
2042
2043 /* Check if we need to update any stepping-related frame id's. */
2044 upd_step_frame_id = frame_id_eq (frame_id,
2045 tp->control.step_frame_id);
2046 upd_step_stack_frame_id = frame_id_eq (frame_id,
2047 tp->control.step_stack_frame_id);
2048
2049 /* We start replaying at the end of the branch trace. This corresponds
2050 to the current instruction. */
2051 replay = XNEW (struct btrace_insn_iterator);
2052 btrace_insn_end (replay, btinfo);
2053
2054 /* Skip gaps at the end of the trace. */
2055 while (btrace_insn_get (replay) == NULL)
2056 {
2057 unsigned int steps;
2058
2059 steps = btrace_insn_prev (replay, 1);
2060 if (steps == 0)
2061 error (_("No trace."));
2062 }
2063
2064 /* We're not replaying, yet. */
2065 gdb_assert (btinfo->replay == NULL);
2066 btinfo->replay = replay;
2067
2068 /* Make sure we're not using any stale registers. */
2069 registers_changed_thread (tp);
2070
2071 /* The current frame with replaying - computed via btrace unwind. */
2072 frame_id = get_thread_current_frame_id (tp);
2073
2074 /* Replace stepping related frames where necessary. */
2075 if (upd_step_frame_id)
2076 tp->control.step_frame_id = frame_id;
2077 if (upd_step_stack_frame_id)
2078 tp->control.step_stack_frame_id = frame_id;
2079 }
2080 catch (const gdb_exception &except)
2081 {
2082 xfree (btinfo->replay);
2083 btinfo->replay = NULL;
2084
2085 registers_changed_thread (tp);
2086
2087 throw;
2088 }
2089
2090 return replay;
2091 }
2092
2093 /* Stop replaying a thread. */
2094
2095 static void
2096 record_btrace_stop_replaying (struct thread_info *tp)
2097 {
2098 struct btrace_thread_info *btinfo;
2099
2100 btinfo = &tp->btrace;
2101
2102 xfree (btinfo->replay);
2103 btinfo->replay = NULL;
2104
2105 /* Make sure we're not leaving any stale registers. */
2106 registers_changed_thread (tp);
2107 }
2108
2109 /* Stop replaying TP if it is at the end of its execution history. */
2110
2111 static void
2112 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2113 {
2114 struct btrace_insn_iterator *replay, end;
2115 struct btrace_thread_info *btinfo;
2116
2117 btinfo = &tp->btrace;
2118 replay = btinfo->replay;
2119
2120 if (replay == NULL)
2121 return;
2122
2123 btrace_insn_end (&end, btinfo);
2124
2125 if (btrace_insn_cmp (replay, &end) == 0)
2126 record_btrace_stop_replaying (tp);
2127 }
2128
2129 /* The resume method of target record-btrace. */
2130
2131 void
2132 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2133 {
2134 enum btrace_thread_flag flag, cflag;
2135
2136 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2137 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2138 step ? "step" : "cont");
2139
2140 /* Store the execution direction of the last resume.
2141
2142 If there is more than one resume call, we have to rely on infrun
2143 to not change the execution direction in-between. */
2144 record_btrace_resume_exec_dir = ::execution_direction;
2145
2146 /* As long as we're not replaying, just forward the request.
2147
2148 For non-stop targets this means that no thread is replaying. In order to
2149 make progress, we may need to explicitly move replaying threads to the end
2150 of their execution history. */
2151 if ((::execution_direction != EXEC_REVERSE)
2152 && !record_is_replaying (minus_one_ptid))
2153 {
2154 this->beneath ()->resume (ptid, step, signal);
2155 return;
2156 }
2157
2158 /* Compute the btrace thread flag for the requested move. */
2159 if (::execution_direction == EXEC_REVERSE)
2160 {
2161 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2162 cflag = BTHR_RCONT;
2163 }
2164 else
2165 {
2166 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2167 cflag = BTHR_CONT;
2168 }
2169
2170 /* We just indicate the resume intent here. The actual stepping happens in
2171 record_btrace_wait below.
2172
2173 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2174
2175 process_stratum_target *proc_target = current_inferior ()->process_target ();
2176
2177 if (!target_is_non_stop_p ())
2178 {
2179 gdb_assert (inferior_ptid.matches (ptid));
2180
2181 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2182 {
2183 if (tp->ptid.matches (inferior_ptid))
2184 record_btrace_resume_thread (tp, flag);
2185 else
2186 record_btrace_resume_thread (tp, cflag);
2187 }
2188 }
2189 else
2190 {
2191 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2192 record_btrace_resume_thread (tp, flag);
2193 }
2194
2195 /* Async support. */
2196 if (target_can_async_p ())
2197 {
2198 target_async (1);
2199 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2200 }
2201 }
2202
2203 /* The commit_resume method of target record-btrace. */
2204
2205 void
2206 record_btrace_target::commit_resume ()
2207 {
2208 if ((::execution_direction != EXEC_REVERSE)
2209 && !record_is_replaying (minus_one_ptid))
2210 beneath ()->commit_resume ();
2211 }
2212
2213 /* Cancel resuming TP. */
2214
2215 static void
2216 record_btrace_cancel_resume (struct thread_info *tp)
2217 {
2218 enum btrace_thread_flag flags;
2219
2220 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2221 if (flags == 0)
2222 return;
2223
2224 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2225 print_thread_id (tp),
2226 target_pid_to_str (tp->ptid).c_str (), flags,
2227 btrace_thread_flag_to_str (flags));
2228
2229 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2230 record_btrace_stop_replaying_at_end (tp);
2231 }
2232
2233 /* Return a target_waitstatus indicating that we ran out of history. */
2234
2235 static struct target_waitstatus
2236 btrace_step_no_history (void)
2237 {
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_NO_HISTORY;
2241
2242 return status;
2243 }
2244
2245 /* Return a target_waitstatus indicating that a step finished. */
2246
2247 static struct target_waitstatus
2248 btrace_step_stopped (void)
2249 {
2250 struct target_waitstatus status;
2251
2252 status.kind = TARGET_WAITKIND_STOPPED;
2253 status.value.sig = GDB_SIGNAL_TRAP;
2254
2255 return status;
2256 }
2257
2258 /* Return a target_waitstatus indicating that a thread was stopped as
2259 requested. */
2260
2261 static struct target_waitstatus
2262 btrace_step_stopped_on_request (void)
2263 {
2264 struct target_waitstatus status;
2265
2266 status.kind = TARGET_WAITKIND_STOPPED;
2267 status.value.sig = GDB_SIGNAL_0;
2268
2269 return status;
2270 }
2271
2272 /* Return a target_waitstatus indicating a spurious stop. */
2273
2274 static struct target_waitstatus
2275 btrace_step_spurious (void)
2276 {
2277 struct target_waitstatus status;
2278
2279 status.kind = TARGET_WAITKIND_SPURIOUS;
2280
2281 return status;
2282 }
2283
2284 /* Return a target_waitstatus indicating that the thread was not resumed. */
2285
2286 static struct target_waitstatus
2287 btrace_step_no_resumed (void)
2288 {
2289 struct target_waitstatus status;
2290
2291 status.kind = TARGET_WAITKIND_NO_RESUMED;
2292
2293 return status;
2294 }
2295
2296 /* Return a target_waitstatus indicating that we should wait again. */
2297
2298 static struct target_waitstatus
2299 btrace_step_again (void)
2300 {
2301 struct target_waitstatus status;
2302
2303 status.kind = TARGET_WAITKIND_IGNORE;
2304
2305 return status;
2306 }
2307
2308 /* Clear the record histories. */
2309
2310 static void
2311 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2312 {
2313 xfree (btinfo->insn_history);
2314 xfree (btinfo->call_history);
2315
2316 btinfo->insn_history = NULL;
2317 btinfo->call_history = NULL;
2318 }
2319
2320 /* Check whether TP's current replay position is at a breakpoint. */
2321
2322 static int
2323 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2324 {
2325 struct btrace_insn_iterator *replay;
2326 struct btrace_thread_info *btinfo;
2327 const struct btrace_insn *insn;
2328
2329 btinfo = &tp->btrace;
2330 replay = btinfo->replay;
2331
2332 if (replay == NULL)
2333 return 0;
2334
2335 insn = btrace_insn_get (replay);
2336 if (insn == NULL)
2337 return 0;
2338
2339 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2340 &btinfo->stop_reason);
2341 }
2342
2343 /* Step one instruction in forward direction. */
2344
2345 static struct target_waitstatus
2346 record_btrace_single_step_forward (struct thread_info *tp)
2347 {
2348 struct btrace_insn_iterator *replay, end, start;
2349 struct btrace_thread_info *btinfo;
2350
2351 btinfo = &tp->btrace;
2352 replay = btinfo->replay;
2353
2354 /* We're done if we're not replaying. */
2355 if (replay == NULL)
2356 return btrace_step_no_history ();
2357
2358 /* Check if we're stepping a breakpoint. */
2359 if (record_btrace_replay_at_breakpoint (tp))
2360 return btrace_step_stopped ();
2361
2362 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2363 jump back to the instruction at which we started. */
2364 start = *replay;
2365 do
2366 {
2367 unsigned int steps;
2368
2369 /* We will bail out here if we continue stepping after reaching the end
2370 of the execution history. */
2371 steps = btrace_insn_next (replay, 1);
2372 if (steps == 0)
2373 {
2374 *replay = start;
2375 return btrace_step_no_history ();
2376 }
2377 }
2378 while (btrace_insn_get (replay) == NULL);
2379
2380 /* Determine the end of the instruction trace. */
2381 btrace_insn_end (&end, btinfo);
2382
2383 /* The execution trace contains (and ends with) the current instruction.
2384 This instruction has not been executed, yet, so the trace really ends
2385 one instruction earlier. */
2386 if (btrace_insn_cmp (replay, &end) == 0)
2387 return btrace_step_no_history ();
2388
2389 return btrace_step_spurious ();
2390 }
2391
2392 /* Step one instruction in backward direction. */
2393
2394 static struct target_waitstatus
2395 record_btrace_single_step_backward (struct thread_info *tp)
2396 {
2397 struct btrace_insn_iterator *replay, start;
2398 struct btrace_thread_info *btinfo;
2399
2400 btinfo = &tp->btrace;
2401 replay = btinfo->replay;
2402
2403 /* Start replaying if we're not already doing so. */
2404 if (replay == NULL)
2405 replay = record_btrace_start_replaying (tp);
2406
2407 /* If we can't step any further, we reached the end of the history.
2408 Skip gaps during replay. If we end up at a gap (at the beginning of
2409 the trace), jump back to the instruction at which we started. */
2410 start = *replay;
2411 do
2412 {
2413 unsigned int steps;
2414
2415 steps = btrace_insn_prev (replay, 1);
2416 if (steps == 0)
2417 {
2418 *replay = start;
2419 return btrace_step_no_history ();
2420 }
2421 }
2422 while (btrace_insn_get (replay) == NULL);
2423
2424 /* Check if we're stepping a breakpoint.
2425
2426 For reverse-stepping, this check is after the step. There is logic in
2427 infrun.c that handles reverse-stepping separately. See, for example,
2428 proceed and adjust_pc_after_break.
2429
2430 This code assumes that for reverse-stepping, PC points to the last
2431 de-executed instruction, whereas for forward-stepping PC points to the
2432 next to-be-executed instruction. */
2433 if (record_btrace_replay_at_breakpoint (tp))
2434 return btrace_step_stopped ();
2435
2436 return btrace_step_spurious ();
2437 }
2438
2439 /* Step a single thread. */
2440
2441 static struct target_waitstatus
2442 record_btrace_step_thread (struct thread_info *tp)
2443 {
2444 struct btrace_thread_info *btinfo;
2445 struct target_waitstatus status;
2446 enum btrace_thread_flag flags;
2447
2448 btinfo = &tp->btrace;
2449
2450 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2451 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2452
2453 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2454 target_pid_to_str (tp->ptid).c_str (), flags,
2455 btrace_thread_flag_to_str (flags));
2456
2457 /* We can't step without an execution history. */
2458 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2459 return btrace_step_no_history ();
2460
2461 switch (flags)
2462 {
2463 default:
2464 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2465
2466 case BTHR_STOP:
2467 return btrace_step_stopped_on_request ();
2468
2469 case BTHR_STEP:
2470 status = record_btrace_single_step_forward (tp);
2471 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2472 break;
2473
2474 return btrace_step_stopped ();
2475
2476 case BTHR_RSTEP:
2477 status = record_btrace_single_step_backward (tp);
2478 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2479 break;
2480
2481 return btrace_step_stopped ();
2482
2483 case BTHR_CONT:
2484 status = record_btrace_single_step_forward (tp);
2485 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2486 break;
2487
2488 btinfo->flags |= flags;
2489 return btrace_step_again ();
2490
2491 case BTHR_RCONT:
2492 status = record_btrace_single_step_backward (tp);
2493 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2494 break;
2495
2496 btinfo->flags |= flags;
2497 return btrace_step_again ();
2498 }
2499
2500 /* We keep threads moving at the end of their execution history. The wait
2501 method will stop the thread for whom the event is reported. */
2502 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2503 btinfo->flags |= flags;
2504
2505 return status;
2506 }
2507
2508 /* Announce further events if necessary. */
2509
2510 static void
2511 record_btrace_maybe_mark_async_event
2512 (const std::vector<thread_info *> &moving,
2513 const std::vector<thread_info *> &no_history)
2514 {
2515 bool more_moving = !moving.empty ();
2516 bool more_no_history = !no_history.empty ();;
2517
2518 if (!more_moving && !more_no_history)
2519 return;
2520
2521 if (more_moving)
2522 DEBUG ("movers pending");
2523
2524 if (more_no_history)
2525 DEBUG ("no-history pending");
2526
2527 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2528 }
2529
2530 /* The wait method of target record-btrace. */
2531
2532 ptid_t
2533 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2534 int options)
2535 {
2536 std::vector<thread_info *> moving;
2537 std::vector<thread_info *> no_history;
2538
2539 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2540
2541 /* As long as we're not replaying, just forward the request. */
2542 if ((::execution_direction != EXEC_REVERSE)
2543 && !record_is_replaying (minus_one_ptid))
2544 {
2545 return this->beneath ()->wait (ptid, status, options);
2546 }
2547
2548 /* Keep a work list of moving threads. */
2549 process_stratum_target *proc_target = current_inferior ()->process_target ();
2550 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2551 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2552 moving.push_back (tp);
2553
2554 if (moving.empty ())
2555 {
2556 *status = btrace_step_no_resumed ();
2557
2558 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2559 target_waitstatus_to_string (status).c_str ());
2560
2561 return null_ptid;
2562 }
2563
2564 /* Step moving threads one by one, one step each, until either one thread
2565 reports an event or we run out of threads to step.
2566
2567 When stepping more than one thread, chances are that some threads reach
2568 the end of their execution history earlier than others. If we reported
2569 this immediately, all-stop on top of non-stop would stop all threads and
2570 resume the same threads next time. And we would report the same thread
2571 having reached the end of its execution history again.
2572
2573 In the worst case, this would starve the other threads. But even if other
2574 threads would be allowed to make progress, this would result in far too
2575 many intermediate stops.
2576
2577 We therefore delay the reporting of "no execution history" until we have
2578 nothing else to report. By this time, all threads should have moved to
2579 either the beginning or the end of their execution history. There will
2580 be a single user-visible stop. */
2581 struct thread_info *eventing = NULL;
2582 while ((eventing == NULL) && !moving.empty ())
2583 {
2584 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2585 {
2586 thread_info *tp = moving[ix];
2587
2588 *status = record_btrace_step_thread (tp);
2589
2590 switch (status->kind)
2591 {
2592 case TARGET_WAITKIND_IGNORE:
2593 ix++;
2594 break;
2595
2596 case TARGET_WAITKIND_NO_HISTORY:
2597 no_history.push_back (ordered_remove (moving, ix));
2598 break;
2599
2600 default:
2601 eventing = unordered_remove (moving, ix);
2602 break;
2603 }
2604 }
2605 }
2606
2607 if (eventing == NULL)
2608 {
2609 /* We started with at least one moving thread. This thread must have
2610 either stopped or reached the end of its execution history.
2611
2612 In the former case, EVENTING must not be NULL.
2613 In the latter case, NO_HISTORY must not be empty. */
2614 gdb_assert (!no_history.empty ());
2615
2616 /* We kept threads moving at the end of their execution history. Stop
2617 EVENTING now that we are going to report its stop. */
2618 eventing = unordered_remove (no_history, 0);
2619 eventing->btrace.flags &= ~BTHR_MOVE;
2620
2621 *status = btrace_step_no_history ();
2622 }
2623
2624 gdb_assert (eventing != NULL);
2625
2626 /* We kept threads replaying at the end of their execution history. Stop
2627 replaying EVENTING now that we are going to report its stop. */
2628 record_btrace_stop_replaying_at_end (eventing);
2629
2630 /* Stop all other threads. */
2631 if (!target_is_non_stop_p ())
2632 {
2633 for (thread_info *tp : all_non_exited_threads ())
2634 record_btrace_cancel_resume (tp);
2635 }
2636
2637 /* In async mode, we need to announce further events. */
2638 if (target_is_async_p ())
2639 record_btrace_maybe_mark_async_event (moving, no_history);
2640
2641 /* Start record histories anew from the current position. */
2642 record_btrace_clear_histories (&eventing->btrace);
2643
2644 /* We moved the replay position but did not update registers. */
2645 registers_changed_thread (eventing);
2646
2647 DEBUG ("wait ended by thread %s (%s): %s",
2648 print_thread_id (eventing),
2649 target_pid_to_str (eventing->ptid).c_str (),
2650 target_waitstatus_to_string (status).c_str ());
2651
2652 return eventing->ptid;
2653 }
2654
2655 /* The stop method of target record-btrace. */
2656
2657 void
2658 record_btrace_target::stop (ptid_t ptid)
2659 {
2660 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2661
2662 /* As long as we're not replaying, just forward the request. */
2663 if ((::execution_direction != EXEC_REVERSE)
2664 && !record_is_replaying (minus_one_ptid))
2665 {
2666 this->beneath ()->stop (ptid);
2667 }
2668 else
2669 {
2670 process_stratum_target *proc_target
2671 = current_inferior ()->process_target ();
2672
2673 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2674 {
2675 tp->btrace.flags &= ~BTHR_MOVE;
2676 tp->btrace.flags |= BTHR_STOP;
2677 }
2678 }
2679 }
2680
2681 /* The can_execute_reverse method of target record-btrace. */
2682
2683 bool
2684 record_btrace_target::can_execute_reverse ()
2685 {
2686 return true;
2687 }
2688
2689 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2690
2691 bool
2692 record_btrace_target::stopped_by_sw_breakpoint ()
2693 {
2694 if (record_is_replaying (minus_one_ptid))
2695 {
2696 struct thread_info *tp = inferior_thread ();
2697
2698 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2699 }
2700
2701 return this->beneath ()->stopped_by_sw_breakpoint ();
2702 }
2703
2704 /* The supports_stopped_by_sw_breakpoint method of target
2705 record-btrace. */
2706
2707 bool
2708 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2709 {
2710 if (record_is_replaying (minus_one_ptid))
2711 return true;
2712
2713 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2714 }
2715
2716 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2717
2718 bool
2719 record_btrace_target::stopped_by_hw_breakpoint ()
2720 {
2721 if (record_is_replaying (minus_one_ptid))
2722 {
2723 struct thread_info *tp = inferior_thread ();
2724
2725 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2726 }
2727
2728 return this->beneath ()->stopped_by_hw_breakpoint ();
2729 }
2730
2731 /* The supports_stopped_by_hw_breakpoint method of target
2732 record-btrace. */
2733
2734 bool
2735 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2736 {
2737 if (record_is_replaying (minus_one_ptid))
2738 return true;
2739
2740 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2741 }
2742
2743 /* The update_thread_list method of target record-btrace. */
2744
2745 void
2746 record_btrace_target::update_thread_list ()
2747 {
2748 /* We don't add or remove threads during replay. */
2749 if (record_is_replaying (minus_one_ptid))
2750 return;
2751
2752 /* Forward the request. */
2753 this->beneath ()->update_thread_list ();
2754 }
2755
2756 /* The thread_alive method of target record-btrace. */
2757
2758 bool
2759 record_btrace_target::thread_alive (ptid_t ptid)
2760 {
2761 /* We don't add or remove threads during replay. */
2762 if (record_is_replaying (minus_one_ptid))
2763 return true;
2764
2765 /* Forward the request. */
2766 return this->beneath ()->thread_alive (ptid);
2767 }
2768
2769 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2770 is stopped. */
2771
2772 static void
2773 record_btrace_set_replay (struct thread_info *tp,
2774 const struct btrace_insn_iterator *it)
2775 {
2776 struct btrace_thread_info *btinfo;
2777
2778 btinfo = &tp->btrace;
2779
2780 if (it == NULL)
2781 record_btrace_stop_replaying (tp);
2782 else
2783 {
2784 if (btinfo->replay == NULL)
2785 record_btrace_start_replaying (tp);
2786 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2787 return;
2788
2789 *btinfo->replay = *it;
2790 registers_changed_thread (tp);
2791 }
2792
2793 /* Start anew from the new replay position. */
2794 record_btrace_clear_histories (btinfo);
2795
2796 inferior_thread ()->suspend.stop_pc
2797 = regcache_read_pc (get_current_regcache ());
2798 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2799 }
2800
2801 /* The goto_record_begin method of target record-btrace. */
2802
2803 void
2804 record_btrace_target::goto_record_begin ()
2805 {
2806 struct thread_info *tp;
2807 struct btrace_insn_iterator begin;
2808
2809 tp = require_btrace_thread ();
2810
2811 btrace_insn_begin (&begin, &tp->btrace);
2812
2813 /* Skip gaps at the beginning of the trace. */
2814 while (btrace_insn_get (&begin) == NULL)
2815 {
2816 unsigned int steps;
2817
2818 steps = btrace_insn_next (&begin, 1);
2819 if (steps == 0)
2820 error (_("No trace."));
2821 }
2822
2823 record_btrace_set_replay (tp, &begin);
2824 }
2825
2826 /* The goto_record_end method of target record-btrace. */
2827
2828 void
2829 record_btrace_target::goto_record_end ()
2830 {
2831 struct thread_info *tp;
2832
2833 tp = require_btrace_thread ();
2834
2835 record_btrace_set_replay (tp, NULL);
2836 }
2837
2838 /* The goto_record method of target record-btrace. */
2839
2840 void
2841 record_btrace_target::goto_record (ULONGEST insn)
2842 {
2843 struct thread_info *tp;
2844 struct btrace_insn_iterator it;
2845 unsigned int number;
2846 int found;
2847
2848 number = insn;
2849
2850 /* Check for wrap-arounds. */
2851 if (number != insn)
2852 error (_("Instruction number out of range."));
2853
2854 tp = require_btrace_thread ();
2855
2856 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2857
2858 /* Check if the instruction could not be found or is a gap. */
2859 if (found == 0 || btrace_insn_get (&it) == NULL)
2860 error (_("No such instruction."));
2861
2862 record_btrace_set_replay (tp, &it);
2863 }
2864
2865 /* The record_stop_replaying method of target record-btrace. */
2866
2867 void
2868 record_btrace_target::record_stop_replaying ()
2869 {
2870 for (thread_info *tp : all_non_exited_threads ())
2871 record_btrace_stop_replaying (tp);
2872 }
2873
2874 /* The execution_direction target method. */
2875
2876 enum exec_direction_kind
2877 record_btrace_target::execution_direction ()
2878 {
2879 return record_btrace_resume_exec_dir;
2880 }
2881
2882 /* The prepare_to_generate_core target method. */
2883
2884 void
2885 record_btrace_target::prepare_to_generate_core ()
2886 {
2887 record_btrace_generating_corefile = 1;
2888 }
2889
2890 /* The done_generating_core target method. */
2891
2892 void
2893 record_btrace_target::done_generating_core ()
2894 {
2895 record_btrace_generating_corefile = 0;
2896 }
2897
2898 /* Start recording in BTS format. */
2899
2900 static void
2901 cmd_record_btrace_bts_start (const char *args, int from_tty)
2902 {
2903 if (args != NULL && *args != 0)
2904 error (_("Invalid argument."));
2905
2906 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2907
2908 try
2909 {
2910 execute_command ("target record-btrace", from_tty);
2911 }
2912 catch (const gdb_exception &exception)
2913 {
2914 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2915 throw;
2916 }
2917 }
2918
2919 /* Start recording in Intel Processor Trace format. */
2920
2921 static void
2922 cmd_record_btrace_pt_start (const char *args, int from_tty)
2923 {
2924 if (args != NULL && *args != 0)
2925 error (_("Invalid argument."));
2926
2927 record_btrace_conf.format = BTRACE_FORMAT_PT;
2928
2929 try
2930 {
2931 execute_command ("target record-btrace", from_tty);
2932 }
2933 catch (const gdb_exception &exception)
2934 {
2935 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2936 throw;
2937 }
2938 }
2939
2940 /* Alias for "target record". */
2941
2942 static void
2943 cmd_record_btrace_start (const char *args, int from_tty)
2944 {
2945 if (args != NULL && *args != 0)
2946 error (_("Invalid argument."));
2947
2948 record_btrace_conf.format = BTRACE_FORMAT_PT;
2949
2950 try
2951 {
2952 execute_command ("target record-btrace", from_tty);
2953 }
2954 catch (const gdb_exception &exception)
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2957
2958 try
2959 {
2960 execute_command ("target record-btrace", from_tty);
2961 }
2962 catch (const gdb_exception &ex)
2963 {
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2965 throw;
2966 }
2967 }
2968 }
2969
2970 /* The "show record btrace replay-memory-access" command. */
2971
2972 static void
2973 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2974 struct cmd_list_element *c, const char *value)
2975 {
2976 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2977 replay_memory_access);
2978 }
2979
2980 /* The "set record btrace cpu none" command. */
2981
2982 static void
2983 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2984 {
2985 if (args != nullptr && *args != 0)
2986 error (_("Trailing junk: '%s'."), args);
2987
2988 record_btrace_cpu_state = CS_NONE;
2989 }
2990
2991 /* The "set record btrace cpu auto" command. */
2992
2993 static void
2994 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2995 {
2996 if (args != nullptr && *args != 0)
2997 error (_("Trailing junk: '%s'."), args);
2998
2999 record_btrace_cpu_state = CS_AUTO;
3000 }
3001
3002 /* The "set record btrace cpu" command. */
3003
3004 static void
3005 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3006 {
3007 if (args == nullptr)
3008 args = "";
3009
3010 /* We use a hard-coded vendor string for now. */
3011 unsigned int family, model, stepping;
3012 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3013 &model, &l1, &stepping, &l2);
3014 if (matches == 3)
3015 {
3016 if (strlen (args) != l2)
3017 error (_("Trailing junk: '%s'."), args + l2);
3018 }
3019 else if (matches == 2)
3020 {
3021 if (strlen (args) != l1)
3022 error (_("Trailing junk: '%s'."), args + l1);
3023
3024 stepping = 0;
3025 }
3026 else
3027 error (_("Bad format. See \"help set record btrace cpu\"."));
3028
3029 if (USHRT_MAX < family)
3030 error (_("Cpu family too big."));
3031
3032 if (UCHAR_MAX < model)
3033 error (_("Cpu model too big."));
3034
3035 if (UCHAR_MAX < stepping)
3036 error (_("Cpu stepping too big."));
3037
3038 record_btrace_cpu.vendor = CV_INTEL;
3039 record_btrace_cpu.family = family;
3040 record_btrace_cpu.model = model;
3041 record_btrace_cpu.stepping = stepping;
3042
3043 record_btrace_cpu_state = CS_CPU;
3044 }
3045
3046 /* The "show record btrace cpu" command. */
3047
3048 static void
3049 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3050 {
3051 if (args != nullptr && *args != 0)
3052 error (_("Trailing junk: '%s'."), args);
3053
3054 switch (record_btrace_cpu_state)
3055 {
3056 case CS_AUTO:
3057 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3058 return;
3059
3060 case CS_NONE:
3061 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3062 return;
3063
3064 case CS_CPU:
3065 switch (record_btrace_cpu.vendor)
3066 {
3067 case CV_INTEL:
3068 if (record_btrace_cpu.stepping == 0)
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model);
3072 else
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3074 record_btrace_cpu.family,
3075 record_btrace_cpu.model,
3076 record_btrace_cpu.stepping);
3077 return;
3078 }
3079 }
3080
3081 error (_("Internal error: bad cpu state."));
3082 }
3083
3084 /* The "record bts buffer-size" show value function. */
3085
3086 static void
3087 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3088 struct cmd_list_element *c,
3089 const char *value)
3090 {
3091 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3092 value);
3093 }
3094
3095 /* The "record pt buffer-size" show value function. */
3096
3097 static void
3098 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3099 struct cmd_list_element *c,
3100 const char *value)
3101 {
3102 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3103 value);
3104 }
3105
3106 /* Initialize btrace commands. */
3107
3108 void _initialize_record_btrace ();
3109 void
3110 _initialize_record_btrace ()
3111 {
3112 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3113 _("Start branch trace recording."), &record_btrace_cmdlist,
3114 "record btrace ", 0, &record_cmdlist);
3115 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3116
3117 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3118 _("\
3119 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3120 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3121 This format may not be available on all processors."),
3122 &record_btrace_cmdlist);
3123 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3124
3125 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3126 _("\
3127 Start branch trace recording in Intel Processor Trace format.\n\n\
3128 This format may not be available on all processors."),
3129 &record_btrace_cmdlist);
3130 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3131
3132 add_basic_prefix_cmd ("btrace", class_support,
3133 _("Set record options."), &set_record_btrace_cmdlist,
3134 "set record btrace ", 0, &set_record_cmdlist);
3135
3136 add_show_prefix_cmd ("btrace", class_support,
3137 _("Show record options."), &show_record_btrace_cmdlist,
3138 "show record btrace ", 0, &show_record_cmdlist);
3139
3140 add_setshow_enum_cmd ("replay-memory-access", no_class,
3141 replay_memory_access_types, &replay_memory_access, _("\
3142 Set what memory accesses are allowed during replay."), _("\
3143 Show what memory accesses are allowed during replay."),
3144 _("Default is READ-ONLY.\n\n\
3145 The btrace record target does not trace data.\n\
3146 The memory therefore corresponds to the live target and not \
3147 to the current replay position.\n\n\
3148 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3149 When READ-WRITE, allow accesses to read-only and read-write memory during \
3150 replay."),
3151 NULL, cmd_show_replay_memory_access,
3152 &set_record_btrace_cmdlist,
3153 &show_record_btrace_cmdlist);
3154
3155 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3156 _("\
3157 Set the cpu to be used for trace decode.\n\n\
3158 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3159 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3160 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3161 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3162 When GDB does not support that cpu, this option can be used to enable\n\
3163 workarounds for a similar cpu that GDB supports.\n\n\
3164 When set to \"none\", errata workarounds are disabled."),
3165 &set_record_btrace_cpu_cmdlist,
3166 "set record btrace cpu ", 1,
3167 &set_record_btrace_cmdlist);
3168
3169 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3170 Automatically determine the cpu to be used for trace decode."),
3171 &set_record_btrace_cpu_cmdlist);
3172
3173 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3174 Do not enable errata workarounds for trace decode."),
3175 &set_record_btrace_cpu_cmdlist);
3176
3177 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3178 Show the cpu to be used for trace decode."),
3179 &show_record_btrace_cmdlist);
3180
3181 add_basic_prefix_cmd ("bts", class_support,
3182 _("Set record btrace bts options."),
3183 &set_record_btrace_bts_cmdlist,
3184 "set record btrace bts ", 0,
3185 &set_record_btrace_cmdlist);
3186
3187 add_show_prefix_cmd ("bts", class_support,
3188 _("Show record btrace bts options."),
3189 &show_record_btrace_bts_cmdlist,
3190 "show record btrace bts ", 0,
3191 &show_record_btrace_cmdlist);
3192
3193 add_setshow_uinteger_cmd ("buffer-size", no_class,
3194 &record_btrace_conf.bts.size,
3195 _("Set the record/replay bts buffer size."),
3196 _("Show the record/replay bts buffer size."), _("\
3197 When starting recording request a trace buffer of this size. \
3198 The actual buffer size may differ from the requested size. \
3199 Use \"info record\" to see the actual buffer size.\n\n\
3200 Bigger buffers allow longer recording but also take more time to process \
3201 the recorded execution trace.\n\n\
3202 The trace buffer size may not be changed while recording."), NULL,
3203 show_record_bts_buffer_size_value,
3204 &set_record_btrace_bts_cmdlist,
3205 &show_record_btrace_bts_cmdlist);
3206
3207 add_basic_prefix_cmd ("pt", class_support,
3208 _("Set record btrace pt options."),
3209 &set_record_btrace_pt_cmdlist,
3210 "set record btrace pt ", 0,
3211 &set_record_btrace_cmdlist);
3212
3213 add_show_prefix_cmd ("pt", class_support,
3214 _("Show record btrace pt options."),
3215 &show_record_btrace_pt_cmdlist,
3216 "show record btrace pt ", 0,
3217 &show_record_btrace_cmdlist);
3218
3219 add_setshow_uinteger_cmd ("buffer-size", no_class,
3220 &record_btrace_conf.pt.size,
3221 _("Set the record/replay pt buffer size."),
3222 _("Show the record/replay pt buffer size."), _("\
3223 Bigger buffers allow longer recording but also take more time to process \
3224 the recorded execution.\n\
3225 The actual buffer size may differ from the requested size. Use \"info record\" \
3226 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3227 &set_record_btrace_pt_cmdlist,
3228 &show_record_btrace_pt_cmdlist);
3229
3230 add_target (record_btrace_target_info, record_btrace_target_open);
3231
3232 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3233 xcalloc, xfree);
3234
3235 record_btrace_conf.bts.size = 64 * 1024;
3236 record_btrace_conf.pt.size = 16 * 1024;
3237 }