1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "gdbsupport/event-loop.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
47 static const target_info record_btrace_target_info
= {
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
53 /* The target_ops of record-btrace. */
55 class record_btrace_target final
: public target_ops
58 const target_info
&info () const override
59 { return record_btrace_target_info
; }
61 strata
stratum () const override
{ return record_stratum
; }
63 void close () override
;
64 void async (int) override
;
66 void detach (inferior
*inf
, int from_tty
) override
67 { record_detach (this, inf
, from_tty
); }
69 void disconnect (const char *, int) override
;
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
75 { record_kill (this); }
77 enum record_method
record_method (ptid_t ptid
) override
;
79 void stop_recording () override
;
80 void info_record () override
;
82 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
83 void insn_history_from (ULONGEST from
, int size
,
84 gdb_disassembly_flags flags
) override
;
85 void insn_history_range (ULONGEST begin
, ULONGEST end
,
86 gdb_disassembly_flags flags
) override
;
87 void call_history (int size
, record_print_flags flags
) override
;
88 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
90 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
93 bool record_is_replaying (ptid_t ptid
) override
;
94 bool record_will_replay (ptid_t ptid
, int dir
) override
;
95 void record_stop_replaying () override
;
97 enum target_xfer_status
xfer_partial (enum target_object object
,
100 const gdb_byte
*writebuf
,
101 ULONGEST offset
, ULONGEST len
,
102 ULONGEST
*xfered_len
) override
;
104 int insert_breakpoint (struct gdbarch
*,
105 struct bp_target_info
*) override
;
106 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
107 enum remove_bp_reason
) override
;
109 void fetch_registers (struct regcache
*, int) override
;
111 void store_registers (struct regcache
*, int) override
;
112 void prepare_to_store (struct regcache
*) override
;
114 const struct frame_unwind
*get_unwinder () override
;
116 const struct frame_unwind
*get_tailcall_unwinder () override
;
118 void commit_resume () override
;
119 void resume (ptid_t
, int, enum gdb_signal
) override
;
120 ptid_t
wait (ptid_t
, struct target_waitstatus
*, int) override
;
122 void stop (ptid_t
) override
;
123 void update_thread_list () override
;
124 bool thread_alive (ptid_t ptid
) override
;
125 void goto_record_begin () override
;
126 void goto_record_end () override
;
127 void goto_record (ULONGEST insn
) override
;
129 bool can_execute_reverse () override
;
131 bool stopped_by_sw_breakpoint () override
;
132 bool supports_stopped_by_sw_breakpoint () override
;
134 bool stopped_by_hw_breakpoint () override
;
135 bool supports_stopped_by_hw_breakpoint () override
;
137 enum exec_direction_kind
execution_direction () override
;
138 void prepare_to_generate_core () override
;
139 void done_generating_core () override
;
142 static record_btrace_target record_btrace_ops
;
144 /* Initialize the record-btrace target ops. */
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token
{};
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only
[] = "read-only";
152 static const char replay_memory_access_read_write
[] = "read-write";
153 static const char *const replay_memory_access_types
[] =
155 replay_memory_access_read_only
,
156 replay_memory_access_read_write
,
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access
= replay_memory_access_read_only
;
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu
;
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element
*set_record_btrace_cmdlist
;
179 static struct cmd_list_element
*show_record_btrace_cmdlist
;
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile
;
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf
;
193 /* Command list for "record btrace". */
194 static struct cmd_list_element
*record_btrace_cmdlist
;
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
198 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
202 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
210 #define DEBUG(msg, args...) \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu
*
223 record_btrace_get_cpu (void)
225 switch (record_btrace_cpu_state
)
231 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
234 return &record_btrace_cpu
;
237 error (_("Internal error: bad record btrace cpu state."));
240 /* Update the branch trace for the current thread and return a pointer to its
243 Throws an error if there is no thread or no trace. This function never
246 static struct thread_info
*
247 require_btrace_thread (void)
251 if (inferior_ptid
== null_ptid
)
252 error (_("No thread."));
254 thread_info
*tp
= inferior_thread ();
256 validate_registers_access ();
258 btrace_fetch (tp
, record_btrace_get_cpu ());
260 if (btrace_is_empty (tp
))
261 error (_("No trace."));
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
269 Throws an error if there is no thread or no trace. This function never
272 static struct btrace_thread_info
*
273 require_btrace (void)
275 struct thread_info
*tp
;
277 tp
= require_btrace_thread ();
282 /* Enable branch tracing for one thread. Warn on errors. */
285 record_btrace_enable_warn (struct thread_info
*tp
)
289 btrace_enable (tp
, &record_btrace_conf
);
291 catch (const gdb_exception_error
&error
)
293 warning ("%s", error
.what ());
297 /* Enable automatic tracing of new threads. */
300 record_btrace_auto_enable (void)
302 DEBUG ("attach thread observer");
304 gdb::observers::new_thread
.attach (record_btrace_enable_warn
,
305 record_btrace_thread_observer_token
);
308 /* Disable automatic tracing of new threads. */
311 record_btrace_auto_disable (void)
313 DEBUG ("detach thread observer");
315 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
318 /* The record-btrace async event handler function. */
321 record_btrace_handle_async_inferior_event (gdb_client_data data
)
323 inferior_event_handler (INF_REG_EVENT
, NULL
);
326 /* See record-btrace.h. */
329 record_btrace_push_target (void)
333 record_btrace_auto_enable ();
335 push_target (&record_btrace_ops
);
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
340 record_btrace_generating_corefile
= 0;
342 format
= btrace_format_short_string (record_btrace_conf
.format
);
343 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
346 /* Disable btrace on a set of threads on scope exit. */
348 struct scoped_btrace_disable
350 scoped_btrace_disable () = default;
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
354 ~scoped_btrace_disable ()
356 for (thread_info
*tp
: m_threads
)
360 void add_thread (thread_info
*thread
)
362 m_threads
.push_front (thread
);
371 std::forward_list
<thread_info
*> m_threads
;
374 /* Open target record-btrace. */
377 record_btrace_target_open (const char *args
, int from_tty
)
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable
;
387 if (!target_has_execution
)
388 error (_("The program is not being run."));
390 for (thread_info
*tp
: all_non_exited_threads ())
391 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
393 btrace_enable (tp
, &record_btrace_conf
);
395 btrace_disable
.add_thread (tp
);
398 record_btrace_push_target ();
400 btrace_disable
.discard ();
403 /* The stop_recording method of target record-btrace. */
406 record_btrace_target::stop_recording ()
408 DEBUG ("stop recording");
410 record_btrace_auto_disable ();
412 for (thread_info
*tp
: all_non_exited_threads ())
413 if (tp
->btrace
.target
!= NULL
)
417 /* The disconnect method of target record-btrace. */
420 record_btrace_target::disconnect (const char *args
,
423 struct target_ops
*beneath
= this->beneath ();
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
428 /* Forward disconnect. */
429 beneath
->disconnect (args
, from_tty
);
432 /* The close method of target record-btrace. */
435 record_btrace_target::close ()
437 if (record_btrace_async_inferior_event_handler
!= NULL
)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info
*tp
: all_non_exited_threads ())
447 btrace_teardown (tp
);
450 /* The async method of target record-btrace. */
453 record_btrace_target::async (int enable
)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
458 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
460 this->beneath ()->async (enable
);
463 /* Adjusts the size and returns a human readable size suffix. */
466 record_btrace_adjust_size (unsigned int *size
)
472 if ((sz
& ((1u << 30) - 1)) == 0)
477 else if ((sz
& ((1u << 20) - 1)) == 0)
482 else if ((sz
& ((1u << 10) - 1)) == 0)
491 /* Print a BTS configuration. */
494 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
502 suffix
= record_btrace_adjust_size (&size
);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
507 /* Print an Intel Processor Trace configuration. */
510 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
518 suffix
= record_btrace_adjust_size (&size
);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
523 /* Print a branch tracing configuration. */
526 record_btrace_print_conf (const struct btrace_config
*conf
)
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf
->format
));
531 switch (conf
->format
)
533 case BTRACE_FORMAT_NONE
:
536 case BTRACE_FORMAT_BTS
:
537 record_btrace_print_bts_conf (&conf
->bts
);
540 case BTRACE_FORMAT_PT
:
541 record_btrace_print_pt_conf (&conf
->pt
);
545 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format."));
548 /* The info_record method of target record-btrace. */
551 record_btrace_target::info_record ()
553 struct btrace_thread_info
*btinfo
;
554 const struct btrace_config
*conf
;
555 struct thread_info
*tp
;
556 unsigned int insns
, calls
, gaps
;
560 if (inferior_ptid
== null_ptid
)
561 error (_("No thread."));
563 tp
= inferior_thread ();
565 validate_registers_access ();
567 btinfo
= &tp
->btrace
;
569 conf
= ::btrace_conf (btinfo
);
571 record_btrace_print_conf (conf
);
573 btrace_fetch (tp
, record_btrace_get_cpu ());
579 if (!btrace_is_empty (tp
))
581 struct btrace_call_iterator call
;
582 struct btrace_insn_iterator insn
;
584 btrace_call_end (&call
, btinfo
);
585 btrace_call_prev (&call
, 1);
586 calls
= btrace_call_number (&call
);
588 btrace_insn_end (&insn
, btinfo
);
589 insns
= btrace_insn_number (&insn
);
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn
) != NULL
)
596 gaps
= btinfo
->ngaps
;
599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
600 "for thread %s (%s).\n"), insns
, calls
, gaps
,
601 print_thread_id (tp
),
602 target_pid_to_str (tp
->ptid
).c_str ());
604 if (btrace_is_replaying (tp
))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo
->replay
));
609 /* Print a decode error. */
612 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
613 enum btrace_format format
)
615 const char *errstr
= btrace_decode_error (format
, errcode
);
617 uiout
->text (_("["));
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
621 uiout
->text (_("decode error ("));
622 uiout
->field_signed ("errcode", errcode
);
623 uiout
->text (_("): "));
625 uiout
->text (errstr
);
626 uiout
->text (_("]\n"));
629 /* A range of source lines. */
631 struct btrace_line_range
633 /* The symtab this line is from. */
634 struct symtab
*symtab
;
636 /* The first line (inclusive). */
639 /* The last line (exclusive). */
643 /* Construct a line range. */
645 static struct btrace_line_range
646 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
648 struct btrace_line_range range
;
650 range
.symtab
= symtab
;
657 /* Add a line to a line range. */
659 static struct btrace_line_range
660 btrace_line_range_add (struct btrace_line_range range
, int line
)
662 if (range
.end
<= range
.begin
)
664 /* This is the first entry. */
666 range
.end
= line
+ 1;
668 else if (line
< range
.begin
)
670 else if (range
.end
< line
)
676 /* Return non-zero if RANGE is empty, zero otherwise. */
679 btrace_line_range_is_empty (struct btrace_line_range range
)
681 return range
.end
<= range
.begin
;
684 /* Return non-zero if LHS contains RHS, zero otherwise. */
687 btrace_line_range_contains_range (struct btrace_line_range lhs
,
688 struct btrace_line_range rhs
)
690 return ((lhs
.symtab
== rhs
.symtab
)
691 && (lhs
.begin
<= rhs
.begin
)
692 && (rhs
.end
<= lhs
.end
));
695 /* Find the line range associated with PC. */
697 static struct btrace_line_range
698 btrace_find_line_range (CORE_ADDR pc
)
700 struct btrace_line_range range
;
701 struct linetable_entry
*lines
;
702 struct linetable
*ltable
;
703 struct symtab
*symtab
;
706 symtab
= find_pc_line_symtab (pc
);
708 return btrace_mk_line_range (NULL
, 0, 0);
710 ltable
= SYMTAB_LINETABLE (symtab
);
712 return btrace_mk_line_range (symtab
, 0, 0);
714 nlines
= ltable
->nitems
;
715 lines
= ltable
->item
;
717 return btrace_mk_line_range (symtab
, 0, 0);
719 range
= btrace_mk_line_range (symtab
, 0, 0);
720 for (i
= 0; i
< nlines
- 1; i
++)
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0)
731 && (lines
[i
].is_stmt
== 1))
732 range
= btrace_line_range_add (range
, lines
[i
].line
);
738 /* Print source lines in LINES to UIOUT.
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
748 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
749 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
750 gdb::optional
<ui_out_emit_list
> *asm_list
,
751 gdb_disassembly_flags flags
)
753 print_source_lines_flags psl_flags
;
755 if (flags
& DISASSEMBLY_FILENAME
)
756 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
758 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
762 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
764 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
766 asm_list
->emplace (uiout
, "line_asm_insn");
770 /* Disassemble a section of the recorded instruction trace. */
773 btrace_insn_history (struct ui_out
*uiout
,
774 const struct btrace_thread_info
*btinfo
,
775 const struct btrace_insn_iterator
*begin
,
776 const struct btrace_insn_iterator
*end
,
777 gdb_disassembly_flags flags
)
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
780 btrace_insn_number (begin
), btrace_insn_number (end
));
782 flags
|= DISASSEMBLY_SPECULATIVE
;
784 struct gdbarch
*gdbarch
= target_gdbarch ();
785 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
787 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
789 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
790 gdb::optional
<ui_out_emit_list
> asm_list
;
792 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
794 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
795 btrace_insn_next (&it
, 1))
797 const struct btrace_insn
*insn
;
799 insn
= btrace_insn_get (&it
);
801 /* A NULL instruction indicates a gap in the trace. */
804 const struct btrace_config
*conf
;
806 conf
= btrace_conf (btinfo
);
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf
!= NULL
);
811 uiout
->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it
));
815 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
820 struct disasm_insn dinsn
;
822 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
824 struct btrace_line_range lines
;
826 lines
= btrace_find_line_range (insn
->pc
);
827 if (!btrace_line_range_is_empty (lines
)
828 && !btrace_line_range_contains_range (last_lines
, lines
))
830 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
834 else if (!src_and_asm_tuple
.has_value ())
836 gdb_assert (!asm_list
.has_value ());
838 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
840 /* No source information. */
841 asm_list
.emplace (uiout
, "line_asm_insn");
844 gdb_assert (src_and_asm_tuple
.has_value ());
845 gdb_assert (asm_list
.has_value ());
848 memset (&dinsn
, 0, sizeof (dinsn
));
849 dinsn
.number
= btrace_insn_number (&it
);
850 dinsn
.addr
= insn
->pc
;
852 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
853 dinsn
.is_speculative
= 1;
855 disasm
.pretty_print_insn (&dinsn
, flags
);
860 /* The insn_history method of target record-btrace. */
863 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
865 struct btrace_thread_info
*btinfo
;
866 struct btrace_insn_history
*history
;
867 struct btrace_insn_iterator begin
, end
;
868 struct ui_out
*uiout
;
869 unsigned int context
, covered
;
871 uiout
= current_uiout
;
872 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
873 context
= abs (size
);
875 error (_("Bad record instruction-history-size."));
877 btinfo
= require_btrace ();
878 history
= btinfo
->insn_history
;
881 struct btrace_insn_iterator
*replay
;
883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay
= btinfo
->replay
;
891 btrace_insn_end (&begin
, btinfo
);
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
899 /* We want the current position covered, as well. */
900 covered
= btrace_insn_next (&end
, 1);
901 covered
+= btrace_insn_prev (&begin
, context
- covered
);
902 covered
+= btrace_insn_next (&end
, context
- covered
);
906 covered
= btrace_insn_next (&end
, context
);
907 covered
+= btrace_insn_prev (&begin
, context
- covered
);
912 begin
= history
->begin
;
915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
916 btrace_insn_number (&begin
), btrace_insn_number (&end
));
921 covered
= btrace_insn_prev (&begin
, context
);
926 covered
= btrace_insn_next (&end
, context
);
931 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
940 btrace_set_insn_history (btinfo
, &begin
, &end
);
943 /* The insn_history_range method of target record-btrace. */
946 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
947 gdb_disassembly_flags flags
)
949 struct btrace_thread_info
*btinfo
;
950 struct btrace_insn_iterator begin
, end
;
951 struct ui_out
*uiout
;
952 unsigned int low
, high
;
955 uiout
= current_uiout
;
956 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
962 /* Check for wrap-arounds. */
963 if (low
!= from
|| high
!= to
)
964 error (_("Bad range."));
967 error (_("Bad range."));
969 btinfo
= require_btrace ();
971 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
973 error (_("Range out of bounds."));
975 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
978 /* Silently truncate the range. */
979 btrace_insn_end (&end
, btinfo
);
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end
, 1);
987 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
988 btrace_set_insn_history (btinfo
, &begin
, &end
);
991 /* The insn_history_from method of target record-btrace. */
994 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
995 gdb_disassembly_flags flags
)
997 ULONGEST begin
, end
, context
;
999 context
= abs (size
);
1001 error (_("Bad record instruction-history-size."));
1010 begin
= from
- context
+ 1;
1015 end
= from
+ context
- 1;
1017 /* Check for wrap-around. */
1022 insn_history_range (begin
, end
, flags
);
1025 /* Print the instruction number range for a function call history line. */
1028 btrace_call_history_insn_range (struct ui_out
*uiout
,
1029 const struct btrace_function
*bfun
)
1031 unsigned int begin
, end
, size
;
1033 size
= bfun
->insn
.size ();
1034 gdb_assert (size
> 0);
1036 begin
= bfun
->insn_offset
;
1037 end
= begin
+ size
- 1;
1039 uiout
->field_unsigned ("insn begin", begin
);
1041 uiout
->field_unsigned ("insn end", end
);
1044 /* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1050 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1051 int *pbegin
, int *pend
)
1053 struct symtab
*symtab
;
1064 symtab
= symbol_symtab (sym
);
1066 for (const btrace_insn
&insn
: bfun
->insn
)
1068 struct symtab_and_line sal
;
1070 sal
= find_pc_line (insn
.pc
, 0);
1071 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1074 begin
= std::min (begin
, sal
.line
);
1075 end
= std::max (end
, sal
.line
);
1083 /* Print the source line information for a function call history line. */
1086 btrace_call_history_src_line (struct ui_out
*uiout
,
1087 const struct btrace_function
*bfun
)
1096 uiout
->field_string ("file",
1097 symtab_to_filename_for_display (symbol_symtab (sym
)),
1098 file_name_style
.style ());
1100 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1105 uiout
->field_signed ("min line", begin
);
1111 uiout
->field_signed ("max line", end
);
1114 /* Get the name of a branch trace function. */
1117 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1119 struct minimal_symbol
*msym
;
1129 return sym
->print_name ();
1130 else if (msym
!= NULL
)
1131 return msym
->print_name ();
1136 /* Disassemble a section of the recorded function trace. */
1139 btrace_call_history (struct ui_out
*uiout
,
1140 const struct btrace_thread_info
*btinfo
,
1141 const struct btrace_call_iterator
*begin
,
1142 const struct btrace_call_iterator
*end
,
1145 struct btrace_call_iterator it
;
1146 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1149 btrace_call_number (end
));
1151 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1153 const struct btrace_function
*bfun
;
1154 struct minimal_symbol
*msym
;
1157 bfun
= btrace_call_get (&it
);
1161 /* Print the function index. */
1162 uiout
->field_unsigned ("index", bfun
->number
);
1165 /* Indicate gaps in the trace. */
1166 if (bfun
->errcode
!= 0)
1168 const struct btrace_config
*conf
;
1170 conf
= btrace_conf (btinfo
);
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf
!= NULL
);
1175 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1180 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1182 int level
= bfun
->level
+ btinfo
->level
, i
;
1184 for (i
= 0; i
< level
; ++i
)
1189 uiout
->field_string ("function", sym
->print_name (),
1190 function_name_style
.style ());
1191 else if (msym
!= NULL
)
1192 uiout
->field_string ("function", msym
->print_name (),
1193 function_name_style
.style ());
1194 else if (!uiout
->is_mi_like_p ())
1195 uiout
->field_string ("function", "??",
1196 function_name_style
.style ());
1198 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1200 uiout
->text (_("\tinst "));
1201 btrace_call_history_insn_range (uiout
, bfun
);
1204 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1206 uiout
->text (_("\tat "));
1207 btrace_call_history_src_line (uiout
, bfun
);
1214 /* The call_history method of target record-btrace. */
1217 record_btrace_target::call_history (int size
, record_print_flags flags
)
1219 struct btrace_thread_info
*btinfo
;
1220 struct btrace_call_history
*history
;
1221 struct btrace_call_iterator begin
, end
;
1222 struct ui_out
*uiout
;
1223 unsigned int context
, covered
;
1225 uiout
= current_uiout
;
1226 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1227 context
= abs (size
);
1229 error (_("Bad record function-call-history-size."));
1231 btinfo
= require_btrace ();
1232 history
= btinfo
->call_history
;
1233 if (history
== NULL
)
1235 struct btrace_insn_iterator
*replay
;
1237 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay
= btinfo
->replay
;
1244 begin
.btinfo
= btinfo
;
1245 begin
.index
= replay
->call_index
;
1248 btrace_call_end (&begin
, btinfo
);
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1256 /* We want the current position covered, as well. */
1257 covered
= btrace_call_next (&end
, 1);
1258 covered
+= btrace_call_prev (&begin
, context
- covered
);
1259 covered
+= btrace_call_next (&end
, context
- covered
);
1263 covered
= btrace_call_next (&end
, context
);
1264 covered
+= btrace_call_prev (&begin
, context
- covered
);
1269 begin
= history
->begin
;
1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1273 btrace_call_number (&begin
), btrace_call_number (&end
));
1278 covered
= btrace_call_prev (&begin
, context
);
1283 covered
= btrace_call_next (&end
, context
);
1288 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1297 btrace_set_call_history (btinfo
, &begin
, &end
);
1300 /* The call_history_range method of target record-btrace. */
1303 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1304 record_print_flags flags
)
1306 struct btrace_thread_info
*btinfo
;
1307 struct btrace_call_iterator begin
, end
;
1308 struct ui_out
*uiout
;
1309 unsigned int low
, high
;
1312 uiout
= current_uiout
;
1313 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1319 /* Check for wrap-arounds. */
1320 if (low
!= from
|| high
!= to
)
1321 error (_("Bad range."));
1324 error (_("Bad range."));
1326 btinfo
= require_btrace ();
1328 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1330 error (_("Range out of bounds."));
1332 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end
, btinfo
);
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end
, 1);
1344 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1345 btrace_set_call_history (btinfo
, &begin
, &end
);
1348 /* The call_history_from method of target record-btrace. */
1351 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1352 record_print_flags flags
)
1354 ULONGEST begin
, end
, context
;
1356 context
= abs (size
);
1358 error (_("Bad record function-call-history-size."));
1367 begin
= from
- context
+ 1;
1372 end
= from
+ context
- 1;
1374 /* Check for wrap-around. */
1379 call_history_range ( begin
, end
, flags
);
1382 /* The record_method method of target record-btrace. */
1385 record_btrace_target::record_method (ptid_t ptid
)
1387 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1388 thread_info
*const tp
= find_thread_ptid (proc_target
, ptid
);
1391 error (_("No thread."));
1393 if (tp
->btrace
.target
== NULL
)
1394 return RECORD_METHOD_NONE
;
1396 return RECORD_METHOD_BTRACE
;
1399 /* The record_is_replaying method of target record-btrace. */
1402 record_btrace_target::record_is_replaying (ptid_t ptid
)
1404 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1405 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
1406 if (btrace_is_replaying (tp
))
1412 /* The record_will_replay method of target record-btrace. */
1415 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1417 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1420 /* The xfer_partial method of target record-btrace. */
1422 enum target_xfer_status
1423 record_btrace_target::xfer_partial (enum target_object object
,
1424 const char *annex
, gdb_byte
*readbuf
,
1425 const gdb_byte
*writebuf
, ULONGEST offset
,
1426 ULONGEST len
, ULONGEST
*xfered_len
)
1428 /* Filter out requests that don't make sense during replay. */
1429 if (replay_memory_access
== replay_memory_access_read_only
1430 && !record_btrace_generating_corefile
1431 && record_is_replaying (inferior_ptid
))
1435 case TARGET_OBJECT_MEMORY
:
1437 struct target_section
*section
;
1439 /* We do not allow writing memory in general. */
1440 if (writebuf
!= NULL
)
1443 return TARGET_XFER_UNAVAILABLE
;
1446 /* We allow reading readonly memory. */
1447 section
= target_section_by_addr (this, offset
);
1448 if (section
!= NULL
)
1450 /* Check if the section we found is readonly. */
1451 if ((bfd_section_flags (section
->the_bfd_section
)
1452 & SEC_READONLY
) != 0)
1454 /* Truncate the request to fit into this section. */
1455 len
= std::min (len
, section
->endaddr
- offset
);
1461 return TARGET_XFER_UNAVAILABLE
;
1466 /* Forward the request. */
1467 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1468 offset
, len
, xfered_len
);
1471 /* The insert_breakpoint method of target record-btrace. */
1474 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1475 struct bp_target_info
*bp_tgt
)
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old
= replay_memory_access
;
1483 replay_memory_access
= replay_memory_access_read_write
;
1488 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1490 catch (const gdb_exception
&except
)
1492 replay_memory_access
= old
;
1495 replay_memory_access
= old
;
1500 /* The remove_breakpoint method of target record-btrace. */
1503 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1504 struct bp_target_info
*bp_tgt
,
1505 enum remove_bp_reason reason
)
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old
= replay_memory_access
;
1513 replay_memory_access
= replay_memory_access_read_write
;
1518 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1520 catch (const gdb_exception
&except
)
1522 replay_memory_access
= old
;
1525 replay_memory_access
= old
;
1530 /* The fetch_registers method of target record-btrace. */
1533 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1535 btrace_insn_iterator
*replay
= nullptr;
1537 /* Thread-db may ask for a thread's registers before GDB knows about the
1538 thread. We forward the request to the target beneath in this
1540 thread_info
*tp
= find_thread_ptid (regcache
->target (), regcache
->ptid ());
1542 replay
= tp
->btrace
.replay
;
1544 if (replay
!= nullptr && !record_btrace_generating_corefile
)
1546 const struct btrace_insn
*insn
;
1547 struct gdbarch
*gdbarch
;
1550 gdbarch
= regcache
->arch ();
1551 pcreg
= gdbarch_pc_regnum (gdbarch
);
1555 /* We can only provide the PC register. */
1556 if (regno
>= 0 && regno
!= pcreg
)
1559 insn
= btrace_insn_get (replay
);
1560 gdb_assert (insn
!= NULL
);
1562 regcache
->raw_supply (regno
, &insn
->pc
);
1565 this->beneath ()->fetch_registers (regcache
, regno
);
1568 /* The store_registers method of target record-btrace. */
1571 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache
->ptid ()))
1575 error (_("Cannot write registers while replaying."));
1577 gdb_assert (may_write_registers
);
1579 this->beneath ()->store_registers (regcache
, regno
);
1582 /* The prepare_to_store method of target record-btrace. */
1585 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1587 if (!record_btrace_generating_corefile
1588 && record_is_replaying (regcache
->ptid ()))
1591 this->beneath ()->prepare_to_store (regcache
);
1594 /* The branch trace frame cache. */
1596 struct btrace_frame_cache
1599 struct thread_info
*tp
;
1601 /* The frame info. */
1602 struct frame_info
*frame
;
1604 /* The branch trace function segment. */
1605 const struct btrace_function
*bfun
;
1608 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1610 static htab_t bfcache
;
1612 /* hash_f for htab_create_alloc of bfcache. */
1615 bfcache_hash (const void *arg
)
1617 const struct btrace_frame_cache
*cache
1618 = (const struct btrace_frame_cache
*) arg
;
1620 return htab_hash_pointer (cache
->frame
);
1623 /* eq_f for htab_create_alloc of bfcache. */
1626 bfcache_eq (const void *arg1
, const void *arg2
)
1628 const struct btrace_frame_cache
*cache1
1629 = (const struct btrace_frame_cache
*) arg1
;
1630 const struct btrace_frame_cache
*cache2
1631 = (const struct btrace_frame_cache
*) arg2
;
1633 return cache1
->frame
== cache2
->frame
;
1636 /* Create a new btrace frame cache. */
1638 static struct btrace_frame_cache
*
1639 bfcache_new (struct frame_info
*frame
)
1641 struct btrace_frame_cache
*cache
;
1644 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1645 cache
->frame
= frame
;
1647 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1648 gdb_assert (*slot
== NULL
);
1654 /* Extract the branch trace function from a branch trace frame. */
1656 static const struct btrace_function
*
1657 btrace_get_frame_function (struct frame_info
*frame
)
1659 const struct btrace_frame_cache
*cache
;
1660 struct btrace_frame_cache pattern
;
1663 pattern
.frame
= frame
;
1665 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1669 cache
= (const struct btrace_frame_cache
*) *slot
;
1673 /* Implement stop_reason method for record_btrace_frame_unwind. */
1675 static enum unwind_stop_reason
1676 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1679 const struct btrace_frame_cache
*cache
;
1680 const struct btrace_function
*bfun
;
1682 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1684 gdb_assert (bfun
!= NULL
);
1687 return UNWIND_UNAVAILABLE
;
1689 return UNWIND_NO_REASON
;
1692 /* Implement this_id method for record_btrace_frame_unwind. */
1695 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1696 struct frame_id
*this_id
)
1698 const struct btrace_frame_cache
*cache
;
1699 const struct btrace_function
*bfun
;
1700 struct btrace_call_iterator it
;
1701 CORE_ADDR code
, special
;
1703 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1706 gdb_assert (bfun
!= NULL
);
1708 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1709 bfun
= btrace_call_get (&it
);
1711 code
= get_frame_func (this_frame
);
1712 special
= bfun
->number
;
1714 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1716 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1717 btrace_get_bfun_name (cache
->bfun
),
1718 core_addr_to_string_nz (this_id
->code_addr
),
1719 core_addr_to_string_nz (this_id
->special_addr
));
1722 /* Implement prev_register method for record_btrace_frame_unwind. */
1724 static struct value
*
1725 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1729 const struct btrace_frame_cache
*cache
;
1730 const struct btrace_function
*bfun
, *caller
;
1731 struct btrace_call_iterator it
;
1732 struct gdbarch
*gdbarch
;
1736 gdbarch
= get_frame_arch (this_frame
);
1737 pcreg
= gdbarch_pc_regnum (gdbarch
);
1738 if (pcreg
< 0 || regnum
!= pcreg
)
1739 throw_error (NOT_AVAILABLE_ERROR
,
1740 _("Registers are not available in btrace record history"));
1742 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1744 gdb_assert (bfun
!= NULL
);
1746 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1747 throw_error (NOT_AVAILABLE_ERROR
,
1748 _("No caller in btrace record history"));
1750 caller
= btrace_call_get (&it
);
1752 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1753 pc
= caller
->insn
.front ().pc
;
1756 pc
= caller
->insn
.back ().pc
;
1757 pc
+= gdb_insn_length (gdbarch
, pc
);
1760 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1761 btrace_get_bfun_name (bfun
), bfun
->level
,
1762 core_addr_to_string_nz (pc
));
1764 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1767 /* Implement sniffer method for record_btrace_frame_unwind. */
1770 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1771 struct frame_info
*this_frame
,
1774 const struct btrace_function
*bfun
;
1775 struct btrace_frame_cache
*cache
;
1776 struct thread_info
*tp
;
1777 struct frame_info
*next
;
1779 /* THIS_FRAME does not contain a reference to its thread. */
1780 tp
= inferior_thread ();
1783 next
= get_next_frame (this_frame
);
1786 const struct btrace_insn_iterator
*replay
;
1788 replay
= tp
->btrace
.replay
;
1790 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1794 const struct btrace_function
*callee
;
1795 struct btrace_call_iterator it
;
1797 callee
= btrace_get_frame_function (next
);
1798 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1801 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1804 bfun
= btrace_call_get (&it
);
1810 DEBUG ("[frame] sniffed frame for %s on level %d",
1811 btrace_get_bfun_name (bfun
), bfun
->level
);
1813 /* This is our frame. Initialize the frame cache. */
1814 cache
= bfcache_new (this_frame
);
1818 *this_cache
= cache
;
1822 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1825 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1826 struct frame_info
*this_frame
,
1829 const struct btrace_function
*bfun
, *callee
;
1830 struct btrace_frame_cache
*cache
;
1831 struct btrace_call_iterator it
;
1832 struct frame_info
*next
;
1833 struct thread_info
*tinfo
;
1835 next
= get_next_frame (this_frame
);
1839 callee
= btrace_get_frame_function (next
);
1843 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1846 tinfo
= inferior_thread ();
1847 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1850 bfun
= btrace_call_get (&it
);
1852 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1853 btrace_get_bfun_name (bfun
), bfun
->level
);
1855 /* This is our frame. Initialize the frame cache. */
1856 cache
= bfcache_new (this_frame
);
1860 *this_cache
= cache
;
1865 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1867 struct btrace_frame_cache
*cache
;
1870 cache
= (struct btrace_frame_cache
*) this_cache
;
1872 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1873 gdb_assert (slot
!= NULL
);
1875 htab_remove_elt (bfcache
, cache
);
1878 /* btrace recording does not store previous memory content, neither the stack
1879 frames content. Any unwinding would return erroneous results as the stack
1880 contents no longer matches the changed PC value restored from history.
1881 Therefore this unwinder reports any possibly unwound registers as
1884 const struct frame_unwind record_btrace_frame_unwind
=
1887 record_btrace_frame_unwind_stop_reason
,
1888 record_btrace_frame_this_id
,
1889 record_btrace_frame_prev_register
,
1891 record_btrace_frame_sniffer
,
1892 record_btrace_frame_dealloc_cache
1895 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1898 record_btrace_frame_unwind_stop_reason
,
1899 record_btrace_frame_this_id
,
1900 record_btrace_frame_prev_register
,
1902 record_btrace_tailcall_frame_sniffer
,
1903 record_btrace_frame_dealloc_cache
1906 /* Implement the get_unwinder method. */
1908 const struct frame_unwind
*
1909 record_btrace_target::get_unwinder ()
1911 return &record_btrace_frame_unwind
;
1914 /* Implement the get_tailcall_unwinder method. */
1916 const struct frame_unwind
*
1917 record_btrace_target::get_tailcall_unwinder ()
1919 return &record_btrace_tailcall_frame_unwind
;
1922 /* Return a human-readable string for FLAG. */
1925 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1933 return "reverse-step";
1939 return "reverse-cont";
1948 /* Indicate that TP should be resumed according to FLAG. */
1951 record_btrace_resume_thread (struct thread_info
*tp
,
1952 enum btrace_thread_flag flag
)
1954 struct btrace_thread_info
*btinfo
;
1956 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1957 target_pid_to_str (tp
->ptid
).c_str (), flag
,
1958 btrace_thread_flag_to_str (flag
));
1960 btinfo
= &tp
->btrace
;
1962 /* Fetch the latest branch trace. */
1963 btrace_fetch (tp
, record_btrace_get_cpu ());
1965 /* A resume request overwrites a preceding resume or stop request. */
1966 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1967 btinfo
->flags
|= flag
;
1970 /* Get the current frame for TP. */
1972 static struct frame_id
1973 get_thread_current_frame_id (struct thread_info
*tp
)
1978 /* Set current thread, which is implicitly used by
1979 get_current_frame. */
1980 scoped_restore_current_thread restore_thread
;
1982 switch_to_thread (tp
);
1984 process_stratum_target
*proc_target
= tp
->inf
->process_target ();
1986 /* Clear the executing flag to allow changes to the current frame.
1987 We are not actually running, yet. We just started a reverse execution
1988 command or a record goto command.
1989 For the latter, EXECUTING is false and this has no effect.
1990 For the former, EXECUTING is true and we're in wait, about to
1991 move the thread. Since we need to recompute the stack, we temporarily
1992 set EXECUTING to false. */
1993 executing
= tp
->executing
;
1994 set_executing (proc_target
, inferior_ptid
, false);
1999 id
= get_frame_id (get_current_frame ());
2001 catch (const gdb_exception
&except
)
2003 /* Restore the previous execution state. */
2004 set_executing (proc_target
, inferior_ptid
, executing
);
2009 /* Restore the previous execution state. */
2010 set_executing (proc_target
, inferior_ptid
, executing
);
2015 /* Start replaying a thread. */
2017 static struct btrace_insn_iterator
*
2018 record_btrace_start_replaying (struct thread_info
*tp
)
2020 struct btrace_insn_iterator
*replay
;
2021 struct btrace_thread_info
*btinfo
;
2023 btinfo
= &tp
->btrace
;
2026 /* We can't start replaying without trace. */
2027 if (btinfo
->functions
.empty ())
2030 /* GDB stores the current frame_id when stepping in order to detects steps
2032 Since frames are computed differently when we're replaying, we need to
2033 recompute those stored frames and fix them up so we can still detect
2034 subroutines after we started replaying. */
2037 struct frame_id frame_id
;
2038 int upd_step_frame_id
, upd_step_stack_frame_id
;
2040 /* The current frame without replaying - computed via normal unwind. */
2041 frame_id
= get_thread_current_frame_id (tp
);
2043 /* Check if we need to update any stepping-related frame id's. */
2044 upd_step_frame_id
= frame_id_eq (frame_id
,
2045 tp
->control
.step_frame_id
);
2046 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
2047 tp
->control
.step_stack_frame_id
);
2049 /* We start replaying at the end of the branch trace. This corresponds
2050 to the current instruction. */
2051 replay
= XNEW (struct btrace_insn_iterator
);
2052 btrace_insn_end (replay
, btinfo
);
2054 /* Skip gaps at the end of the trace. */
2055 while (btrace_insn_get (replay
) == NULL
)
2059 steps
= btrace_insn_prev (replay
, 1);
2061 error (_("No trace."));
2064 /* We're not replaying, yet. */
2065 gdb_assert (btinfo
->replay
== NULL
);
2066 btinfo
->replay
= replay
;
2068 /* Make sure we're not using any stale registers. */
2069 registers_changed_thread (tp
);
2071 /* The current frame with replaying - computed via btrace unwind. */
2072 frame_id
= get_thread_current_frame_id (tp
);
2074 /* Replace stepping related frames where necessary. */
2075 if (upd_step_frame_id
)
2076 tp
->control
.step_frame_id
= frame_id
;
2077 if (upd_step_stack_frame_id
)
2078 tp
->control
.step_stack_frame_id
= frame_id
;
2080 catch (const gdb_exception
&except
)
2082 xfree (btinfo
->replay
);
2083 btinfo
->replay
= NULL
;
2085 registers_changed_thread (tp
);
2093 /* Stop replaying a thread. */
2096 record_btrace_stop_replaying (struct thread_info
*tp
)
2098 struct btrace_thread_info
*btinfo
;
2100 btinfo
= &tp
->btrace
;
2102 xfree (btinfo
->replay
);
2103 btinfo
->replay
= NULL
;
2105 /* Make sure we're not leaving any stale registers. */
2106 registers_changed_thread (tp
);
2109 /* Stop replaying TP if it is at the end of its execution history. */
2112 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2114 struct btrace_insn_iterator
*replay
, end
;
2115 struct btrace_thread_info
*btinfo
;
2117 btinfo
= &tp
->btrace
;
2118 replay
= btinfo
->replay
;
2123 btrace_insn_end (&end
, btinfo
);
2125 if (btrace_insn_cmp (replay
, &end
) == 0)
2126 record_btrace_stop_replaying (tp
);
2129 /* The resume method of target record-btrace. */
2132 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2134 enum btrace_thread_flag flag
, cflag
;
2136 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
).c_str (),
2137 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2138 step
? "step" : "cont");
2140 /* Store the execution direction of the last resume.
2142 If there is more than one resume call, we have to rely on infrun
2143 to not change the execution direction in-between. */
2144 record_btrace_resume_exec_dir
= ::execution_direction
;
2146 /* As long as we're not replaying, just forward the request.
2148 For non-stop targets this means that no thread is replaying. In order to
2149 make progress, we may need to explicitly move replaying threads to the end
2150 of their execution history. */
2151 if ((::execution_direction
!= EXEC_REVERSE
)
2152 && !record_is_replaying (minus_one_ptid
))
2154 this->beneath ()->resume (ptid
, step
, signal
);
2158 /* Compute the btrace thread flag for the requested move. */
2159 if (::execution_direction
== EXEC_REVERSE
)
2161 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2166 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2170 /* We just indicate the resume intent here. The actual stepping happens in
2171 record_btrace_wait below.
2173 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2175 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2177 if (!target_is_non_stop_p ())
2179 gdb_assert (inferior_ptid
.matches (ptid
));
2181 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2183 if (tp
->ptid
.matches (inferior_ptid
))
2184 record_btrace_resume_thread (tp
, flag
);
2186 record_btrace_resume_thread (tp
, cflag
);
2191 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2192 record_btrace_resume_thread (tp
, flag
);
2195 /* Async support. */
2196 if (target_can_async_p ())
2199 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2203 /* The commit_resume method of target record-btrace. */
2206 record_btrace_target::commit_resume ()
2208 if ((::execution_direction
!= EXEC_REVERSE
)
2209 && !record_is_replaying (minus_one_ptid
))
2210 beneath ()->commit_resume ();
2213 /* Cancel resuming TP. */
2216 record_btrace_cancel_resume (struct thread_info
*tp
)
2218 enum btrace_thread_flag flags
;
2220 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2224 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2225 print_thread_id (tp
),
2226 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2227 btrace_thread_flag_to_str (flags
));
2229 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2230 record_btrace_stop_replaying_at_end (tp
);
2233 /* Return a target_waitstatus indicating that we ran out of history. */
2235 static struct target_waitstatus
2236 btrace_step_no_history (void)
2238 struct target_waitstatus status
;
2240 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2245 /* Return a target_waitstatus indicating that a step finished. */
2247 static struct target_waitstatus
2248 btrace_step_stopped (void)
2250 struct target_waitstatus status
;
2252 status
.kind
= TARGET_WAITKIND_STOPPED
;
2253 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2258 /* Return a target_waitstatus indicating that a thread was stopped as
2261 static struct target_waitstatus
2262 btrace_step_stopped_on_request (void)
2264 struct target_waitstatus status
;
2266 status
.kind
= TARGET_WAITKIND_STOPPED
;
2267 status
.value
.sig
= GDB_SIGNAL_0
;
2272 /* Return a target_waitstatus indicating a spurious stop. */
2274 static struct target_waitstatus
2275 btrace_step_spurious (void)
2277 struct target_waitstatus status
;
2279 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2284 /* Return a target_waitstatus indicating that the thread was not resumed. */
2286 static struct target_waitstatus
2287 btrace_step_no_resumed (void)
2289 struct target_waitstatus status
;
2291 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2296 /* Return a target_waitstatus indicating that we should wait again. */
2298 static struct target_waitstatus
2299 btrace_step_again (void)
2301 struct target_waitstatus status
;
2303 status
.kind
= TARGET_WAITKIND_IGNORE
;
2308 /* Clear the record histories. */
2311 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2313 xfree (btinfo
->insn_history
);
2314 xfree (btinfo
->call_history
);
2316 btinfo
->insn_history
= NULL
;
2317 btinfo
->call_history
= NULL
;
2320 /* Check whether TP's current replay position is at a breakpoint. */
2323 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2325 struct btrace_insn_iterator
*replay
;
2326 struct btrace_thread_info
*btinfo
;
2327 const struct btrace_insn
*insn
;
2329 btinfo
= &tp
->btrace
;
2330 replay
= btinfo
->replay
;
2335 insn
= btrace_insn_get (replay
);
2339 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2340 &btinfo
->stop_reason
);
2343 /* Step one instruction in forward direction. */
2345 static struct target_waitstatus
2346 record_btrace_single_step_forward (struct thread_info
*tp
)
2348 struct btrace_insn_iterator
*replay
, end
, start
;
2349 struct btrace_thread_info
*btinfo
;
2351 btinfo
= &tp
->btrace
;
2352 replay
= btinfo
->replay
;
2354 /* We're done if we're not replaying. */
2356 return btrace_step_no_history ();
2358 /* Check if we're stepping a breakpoint. */
2359 if (record_btrace_replay_at_breakpoint (tp
))
2360 return btrace_step_stopped ();
2362 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2363 jump back to the instruction at which we started. */
2369 /* We will bail out here if we continue stepping after reaching the end
2370 of the execution history. */
2371 steps
= btrace_insn_next (replay
, 1);
2375 return btrace_step_no_history ();
2378 while (btrace_insn_get (replay
) == NULL
);
2380 /* Determine the end of the instruction trace. */
2381 btrace_insn_end (&end
, btinfo
);
2383 /* The execution trace contains (and ends with) the current instruction.
2384 This instruction has not been executed, yet, so the trace really ends
2385 one instruction earlier. */
2386 if (btrace_insn_cmp (replay
, &end
) == 0)
2387 return btrace_step_no_history ();
2389 return btrace_step_spurious ();
2392 /* Step one instruction in backward direction. */
2394 static struct target_waitstatus
2395 record_btrace_single_step_backward (struct thread_info
*tp
)
2397 struct btrace_insn_iterator
*replay
, start
;
2398 struct btrace_thread_info
*btinfo
;
2400 btinfo
= &tp
->btrace
;
2401 replay
= btinfo
->replay
;
2403 /* Start replaying if we're not already doing so. */
2405 replay
= record_btrace_start_replaying (tp
);
2407 /* If we can't step any further, we reached the end of the history.
2408 Skip gaps during replay. If we end up at a gap (at the beginning of
2409 the trace), jump back to the instruction at which we started. */
2415 steps
= btrace_insn_prev (replay
, 1);
2419 return btrace_step_no_history ();
2422 while (btrace_insn_get (replay
) == NULL
);
2424 /* Check if we're stepping a breakpoint.
2426 For reverse-stepping, this check is after the step. There is logic in
2427 infrun.c that handles reverse-stepping separately. See, for example,
2428 proceed and adjust_pc_after_break.
2430 This code assumes that for reverse-stepping, PC points to the last
2431 de-executed instruction, whereas for forward-stepping PC points to the
2432 next to-be-executed instruction. */
2433 if (record_btrace_replay_at_breakpoint (tp
))
2434 return btrace_step_stopped ();
2436 return btrace_step_spurious ();
2439 /* Step a single thread. */
2441 static struct target_waitstatus
2442 record_btrace_step_thread (struct thread_info
*tp
)
2444 struct btrace_thread_info
*btinfo
;
2445 struct target_waitstatus status
;
2446 enum btrace_thread_flag flags
;
2448 btinfo
= &tp
->btrace
;
2450 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2451 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2453 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2454 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2455 btrace_thread_flag_to_str (flags
));
2457 /* We can't step without an execution history. */
2458 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2459 return btrace_step_no_history ();
2464 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2467 return btrace_step_stopped_on_request ();
2470 status
= record_btrace_single_step_forward (tp
);
2471 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2474 return btrace_step_stopped ();
2477 status
= record_btrace_single_step_backward (tp
);
2478 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2481 return btrace_step_stopped ();
2484 status
= record_btrace_single_step_forward (tp
);
2485 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2488 btinfo
->flags
|= flags
;
2489 return btrace_step_again ();
2492 status
= record_btrace_single_step_backward (tp
);
2493 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2496 btinfo
->flags
|= flags
;
2497 return btrace_step_again ();
2500 /* We keep threads moving at the end of their execution history. The wait
2501 method will stop the thread for whom the event is reported. */
2502 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2503 btinfo
->flags
|= flags
;
2508 /* Announce further events if necessary. */
2511 record_btrace_maybe_mark_async_event
2512 (const std::vector
<thread_info
*> &moving
,
2513 const std::vector
<thread_info
*> &no_history
)
2515 bool more_moving
= !moving
.empty ();
2516 bool more_no_history
= !no_history
.empty ();;
2518 if (!more_moving
&& !more_no_history
)
2522 DEBUG ("movers pending");
2524 if (more_no_history
)
2525 DEBUG ("no-history pending");
2527 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2530 /* The wait method of target record-btrace. */
2533 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2536 std::vector
<thread_info
*> moving
;
2537 std::vector
<thread_info
*> no_history
;
2539 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
).c_str (), options
);
2541 /* As long as we're not replaying, just forward the request. */
2542 if ((::execution_direction
!= EXEC_REVERSE
)
2543 && !record_is_replaying (minus_one_ptid
))
2545 return this->beneath ()->wait (ptid
, status
, options
);
2548 /* Keep a work list of moving threads. */
2549 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2550 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2551 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2552 moving
.push_back (tp
);
2554 if (moving
.empty ())
2556 *status
= btrace_step_no_resumed ();
2558 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
).c_str (),
2559 target_waitstatus_to_string (status
).c_str ());
2564 /* Step moving threads one by one, one step each, until either one thread
2565 reports an event or we run out of threads to step.
2567 When stepping more than one thread, chances are that some threads reach
2568 the end of their execution history earlier than others. If we reported
2569 this immediately, all-stop on top of non-stop would stop all threads and
2570 resume the same threads next time. And we would report the same thread
2571 having reached the end of its execution history again.
2573 In the worst case, this would starve the other threads. But even if other
2574 threads would be allowed to make progress, this would result in far too
2575 many intermediate stops.
2577 We therefore delay the reporting of "no execution history" until we have
2578 nothing else to report. By this time, all threads should have moved to
2579 either the beginning or the end of their execution history. There will
2580 be a single user-visible stop. */
2581 struct thread_info
*eventing
= NULL
;
2582 while ((eventing
== NULL
) && !moving
.empty ())
2584 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2586 thread_info
*tp
= moving
[ix
];
2588 *status
= record_btrace_step_thread (tp
);
2590 switch (status
->kind
)
2592 case TARGET_WAITKIND_IGNORE
:
2596 case TARGET_WAITKIND_NO_HISTORY
:
2597 no_history
.push_back (ordered_remove (moving
, ix
));
2601 eventing
= unordered_remove (moving
, ix
);
2607 if (eventing
== NULL
)
2609 /* We started with at least one moving thread. This thread must have
2610 either stopped or reached the end of its execution history.
2612 In the former case, EVENTING must not be NULL.
2613 In the latter case, NO_HISTORY must not be empty. */
2614 gdb_assert (!no_history
.empty ());
2616 /* We kept threads moving at the end of their execution history. Stop
2617 EVENTING now that we are going to report its stop. */
2618 eventing
= unordered_remove (no_history
, 0);
2619 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2621 *status
= btrace_step_no_history ();
2624 gdb_assert (eventing
!= NULL
);
2626 /* We kept threads replaying at the end of their execution history. Stop
2627 replaying EVENTING now that we are going to report its stop. */
2628 record_btrace_stop_replaying_at_end (eventing
);
2630 /* Stop all other threads. */
2631 if (!target_is_non_stop_p ())
2633 for (thread_info
*tp
: all_non_exited_threads ())
2634 record_btrace_cancel_resume (tp
);
2637 /* In async mode, we need to announce further events. */
2638 if (target_is_async_p ())
2639 record_btrace_maybe_mark_async_event (moving
, no_history
);
2641 /* Start record histories anew from the current position. */
2642 record_btrace_clear_histories (&eventing
->btrace
);
2644 /* We moved the replay position but did not update registers. */
2645 registers_changed_thread (eventing
);
2647 DEBUG ("wait ended by thread %s (%s): %s",
2648 print_thread_id (eventing
),
2649 target_pid_to_str (eventing
->ptid
).c_str (),
2650 target_waitstatus_to_string (status
).c_str ());
2652 return eventing
->ptid
;
2655 /* The stop method of target record-btrace. */
2658 record_btrace_target::stop (ptid_t ptid
)
2660 DEBUG ("stop %s", target_pid_to_str (ptid
).c_str ());
2662 /* As long as we're not replaying, just forward the request. */
2663 if ((::execution_direction
!= EXEC_REVERSE
)
2664 && !record_is_replaying (minus_one_ptid
))
2666 this->beneath ()->stop (ptid
);
2670 process_stratum_target
*proc_target
2671 = current_inferior ()->process_target ();
2673 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2675 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2676 tp
->btrace
.flags
|= BTHR_STOP
;
2681 /* The can_execute_reverse method of target record-btrace. */
2684 record_btrace_target::can_execute_reverse ()
2689 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2692 record_btrace_target::stopped_by_sw_breakpoint ()
2694 if (record_is_replaying (minus_one_ptid
))
2696 struct thread_info
*tp
= inferior_thread ();
2698 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2701 return this->beneath ()->stopped_by_sw_breakpoint ();
2704 /* The supports_stopped_by_sw_breakpoint method of target
2708 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2710 if (record_is_replaying (minus_one_ptid
))
2713 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2716 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2719 record_btrace_target::stopped_by_hw_breakpoint ()
2721 if (record_is_replaying (minus_one_ptid
))
2723 struct thread_info
*tp
= inferior_thread ();
2725 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2728 return this->beneath ()->stopped_by_hw_breakpoint ();
2731 /* The supports_stopped_by_hw_breakpoint method of target
2735 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2737 if (record_is_replaying (minus_one_ptid
))
2740 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2743 /* The update_thread_list method of target record-btrace. */
2746 record_btrace_target::update_thread_list ()
2748 /* We don't add or remove threads during replay. */
2749 if (record_is_replaying (minus_one_ptid
))
2752 /* Forward the request. */
2753 this->beneath ()->update_thread_list ();
2756 /* The thread_alive method of target record-btrace. */
2759 record_btrace_target::thread_alive (ptid_t ptid
)
2761 /* We don't add or remove threads during replay. */
2762 if (record_is_replaying (minus_one_ptid
))
2765 /* Forward the request. */
2766 return this->beneath ()->thread_alive (ptid
);
2769 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2773 record_btrace_set_replay (struct thread_info
*tp
,
2774 const struct btrace_insn_iterator
*it
)
2776 struct btrace_thread_info
*btinfo
;
2778 btinfo
= &tp
->btrace
;
2781 record_btrace_stop_replaying (tp
);
2784 if (btinfo
->replay
== NULL
)
2785 record_btrace_start_replaying (tp
);
2786 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2789 *btinfo
->replay
= *it
;
2790 registers_changed_thread (tp
);
2793 /* Start anew from the new replay position. */
2794 record_btrace_clear_histories (btinfo
);
2796 inferior_thread ()->suspend
.stop_pc
2797 = regcache_read_pc (get_current_regcache ());
2798 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2801 /* The goto_record_begin method of target record-btrace. */
2804 record_btrace_target::goto_record_begin ()
2806 struct thread_info
*tp
;
2807 struct btrace_insn_iterator begin
;
2809 tp
= require_btrace_thread ();
2811 btrace_insn_begin (&begin
, &tp
->btrace
);
2813 /* Skip gaps at the beginning of the trace. */
2814 while (btrace_insn_get (&begin
) == NULL
)
2818 steps
= btrace_insn_next (&begin
, 1);
2820 error (_("No trace."));
2823 record_btrace_set_replay (tp
, &begin
);
2826 /* The goto_record_end method of target record-btrace. */
2829 record_btrace_target::goto_record_end ()
2831 struct thread_info
*tp
;
2833 tp
= require_btrace_thread ();
2835 record_btrace_set_replay (tp
, NULL
);
2838 /* The goto_record method of target record-btrace. */
2841 record_btrace_target::goto_record (ULONGEST insn
)
2843 struct thread_info
*tp
;
2844 struct btrace_insn_iterator it
;
2845 unsigned int number
;
2850 /* Check for wrap-arounds. */
2852 error (_("Instruction number out of range."));
2854 tp
= require_btrace_thread ();
2856 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2858 /* Check if the instruction could not be found or is a gap. */
2859 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2860 error (_("No such instruction."));
2862 record_btrace_set_replay (tp
, &it
);
2865 /* The record_stop_replaying method of target record-btrace. */
2868 record_btrace_target::record_stop_replaying ()
2870 for (thread_info
*tp
: all_non_exited_threads ())
2871 record_btrace_stop_replaying (tp
);
2874 /* The execution_direction target method. */
2876 enum exec_direction_kind
2877 record_btrace_target::execution_direction ()
2879 return record_btrace_resume_exec_dir
;
2882 /* The prepare_to_generate_core target method. */
2885 record_btrace_target::prepare_to_generate_core ()
2887 record_btrace_generating_corefile
= 1;
2890 /* The done_generating_core target method. */
2893 record_btrace_target::done_generating_core ()
2895 record_btrace_generating_corefile
= 0;
2898 /* Start recording in BTS format. */
2901 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2903 if (args
!= NULL
&& *args
!= 0)
2904 error (_("Invalid argument."));
2906 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2910 execute_command ("target record-btrace", from_tty
);
2912 catch (const gdb_exception
&exception
)
2914 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2919 /* Start recording in Intel Processor Trace format. */
2922 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2924 if (args
!= NULL
&& *args
!= 0)
2925 error (_("Invalid argument."));
2927 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2931 execute_command ("target record-btrace", from_tty
);
2933 catch (const gdb_exception
&exception
)
2935 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2940 /* Alias for "target record". */
2943 cmd_record_btrace_start (const char *args
, int from_tty
)
2945 if (args
!= NULL
&& *args
!= 0)
2946 error (_("Invalid argument."));
2948 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2952 execute_command ("target record-btrace", from_tty
);
2954 catch (const gdb_exception
&exception
)
2956 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2960 execute_command ("target record-btrace", from_tty
);
2962 catch (const gdb_exception
&ex
)
2964 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2970 /* The "show record btrace replay-memory-access" command. */
2973 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2974 struct cmd_list_element
*c
, const char *value
)
2976 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2977 replay_memory_access
);
2980 /* The "set record btrace cpu none" command. */
2983 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2985 if (args
!= nullptr && *args
!= 0)
2986 error (_("Trailing junk: '%s'."), args
);
2988 record_btrace_cpu_state
= CS_NONE
;
2991 /* The "set record btrace cpu auto" command. */
2994 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
2996 if (args
!= nullptr && *args
!= 0)
2997 error (_("Trailing junk: '%s'."), args
);
2999 record_btrace_cpu_state
= CS_AUTO
;
3002 /* The "set record btrace cpu" command. */
3005 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
3007 if (args
== nullptr)
3010 /* We use a hard-coded vendor string for now. */
3011 unsigned int family
, model
, stepping
;
3012 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
3013 &model
, &l1
, &stepping
, &l2
);
3016 if (strlen (args
) != l2
)
3017 error (_("Trailing junk: '%s'."), args
+ l2
);
3019 else if (matches
== 2)
3021 if (strlen (args
) != l1
)
3022 error (_("Trailing junk: '%s'."), args
+ l1
);
3027 error (_("Bad format. See \"help set record btrace cpu\"."));
3029 if (USHRT_MAX
< family
)
3030 error (_("Cpu family too big."));
3032 if (UCHAR_MAX
< model
)
3033 error (_("Cpu model too big."));
3035 if (UCHAR_MAX
< stepping
)
3036 error (_("Cpu stepping too big."));
3038 record_btrace_cpu
.vendor
= CV_INTEL
;
3039 record_btrace_cpu
.family
= family
;
3040 record_btrace_cpu
.model
= model
;
3041 record_btrace_cpu
.stepping
= stepping
;
3043 record_btrace_cpu_state
= CS_CPU
;
3046 /* The "show record btrace cpu" command. */
3049 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3051 if (args
!= nullptr && *args
!= 0)
3052 error (_("Trailing junk: '%s'."), args
);
3054 switch (record_btrace_cpu_state
)
3057 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3061 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3065 switch (record_btrace_cpu
.vendor
)
3068 if (record_btrace_cpu
.stepping
== 0)
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3070 record_btrace_cpu
.family
,
3071 record_btrace_cpu
.model
);
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3074 record_btrace_cpu
.family
,
3075 record_btrace_cpu
.model
,
3076 record_btrace_cpu
.stepping
);
3081 error (_("Internal error: bad cpu state."));
3084 /* The "record bts buffer-size" show value function. */
3087 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3088 struct cmd_list_element
*c
,
3091 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3095 /* The "record pt buffer-size" show value function. */
3098 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3099 struct cmd_list_element
*c
,
3102 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3106 /* Initialize btrace commands. */
3108 void _initialize_record_btrace ();
3110 _initialize_record_btrace ()
3112 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3113 _("Start branch trace recording."), &record_btrace_cmdlist
,
3114 "record btrace ", 0, &record_cmdlist
);
3115 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3117 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3119 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3120 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3121 This format may not be available on all processors."),
3122 &record_btrace_cmdlist
);
3123 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3125 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3127 Start branch trace recording in Intel Processor Trace format.\n\n\
3128 This format may not be available on all processors."),
3129 &record_btrace_cmdlist
);
3130 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3132 add_basic_prefix_cmd ("btrace", class_support
,
3133 _("Set record options."), &set_record_btrace_cmdlist
,
3134 "set record btrace ", 0, &set_record_cmdlist
);
3136 add_show_prefix_cmd ("btrace", class_support
,
3137 _("Show record options."), &show_record_btrace_cmdlist
,
3138 "show record btrace ", 0, &show_record_cmdlist
);
3140 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3141 replay_memory_access_types
, &replay_memory_access
, _("\
3142 Set what memory accesses are allowed during replay."), _("\
3143 Show what memory accesses are allowed during replay."),
3144 _("Default is READ-ONLY.\n\n\
3145 The btrace record target does not trace data.\n\
3146 The memory therefore corresponds to the live target and not \
3147 to the current replay position.\n\n\
3148 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3149 When READ-WRITE, allow accesses to read-only and read-write memory during \
3151 NULL
, cmd_show_replay_memory_access
,
3152 &set_record_btrace_cmdlist
,
3153 &show_record_btrace_cmdlist
);
3155 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3157 Set the cpu to be used for trace decode.\n\n\
3158 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3159 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3160 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3161 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3162 When GDB does not support that cpu, this option can be used to enable\n\
3163 workarounds for a similar cpu that GDB supports.\n\n\
3164 When set to \"none\", errata workarounds are disabled."),
3165 &set_record_btrace_cpu_cmdlist
,
3166 "set record btrace cpu ", 1,
3167 &set_record_btrace_cmdlist
);
3169 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3170 Automatically determine the cpu to be used for trace decode."),
3171 &set_record_btrace_cpu_cmdlist
);
3173 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3174 Do not enable errata workarounds for trace decode."),
3175 &set_record_btrace_cpu_cmdlist
);
3177 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3178 Show the cpu to be used for trace decode."),
3179 &show_record_btrace_cmdlist
);
3181 add_basic_prefix_cmd ("bts", class_support
,
3182 _("Set record btrace bts options."),
3183 &set_record_btrace_bts_cmdlist
,
3184 "set record btrace bts ", 0,
3185 &set_record_btrace_cmdlist
);
3187 add_show_prefix_cmd ("bts", class_support
,
3188 _("Show record btrace bts options."),
3189 &show_record_btrace_bts_cmdlist
,
3190 "show record btrace bts ", 0,
3191 &show_record_btrace_cmdlist
);
3193 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3194 &record_btrace_conf
.bts
.size
,
3195 _("Set the record/replay bts buffer size."),
3196 _("Show the record/replay bts buffer size."), _("\
3197 When starting recording request a trace buffer of this size. \
3198 The actual buffer size may differ from the requested size. \
3199 Use \"info record\" to see the actual buffer size.\n\n\
3200 Bigger buffers allow longer recording but also take more time to process \
3201 the recorded execution trace.\n\n\
3202 The trace buffer size may not be changed while recording."), NULL
,
3203 show_record_bts_buffer_size_value
,
3204 &set_record_btrace_bts_cmdlist
,
3205 &show_record_btrace_bts_cmdlist
);
3207 add_basic_prefix_cmd ("pt", class_support
,
3208 _("Set record btrace pt options."),
3209 &set_record_btrace_pt_cmdlist
,
3210 "set record btrace pt ", 0,
3211 &set_record_btrace_cmdlist
);
3213 add_show_prefix_cmd ("pt", class_support
,
3214 _("Show record btrace pt options."),
3215 &show_record_btrace_pt_cmdlist
,
3216 "show record btrace pt ", 0,
3217 &show_record_btrace_cmdlist
);
3219 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3220 &record_btrace_conf
.pt
.size
,
3221 _("Set the record/replay pt buffer size."),
3222 _("Show the record/replay pt buffer size."), _("\
3223 Bigger buffers allow longer recording but also take more time to process \
3224 the recorded execution.\n\
3225 The actual buffer size may differ from the requested size. Use \"info record\" \
3226 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3227 &set_record_btrace_pt_cmdlist
,
3228 &show_record_btrace_pt_cmdlist
);
3230 add_target (record_btrace_target_info
, record_btrace_target_open
);
3232 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3235 record_btrace_conf
.bts
.size
= 64 * 1024;
3236 record_btrace_conf
.pt
.size
= 16 * 1024;