Add target_ops argument to to_call_history_from
[binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
41
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
44
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
47
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
67 static struct thread_info *
68 require_btrace_thread (void)
69 {
70 struct thread_info *tp;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
82
83 return tp;
84 }
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct btrace_thread_info *
93 require_btrace (void)
94 {
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
100 }
101
102 /* Enable branch tracing for one thread. Warn on errors. */
103
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
106 {
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114 }
115
116 /* Callback function to disable branch tracing for one thread. */
117
118 static void
119 record_btrace_disable_callback (void *arg)
120 {
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126 }
127
128 /* Enable automatic tracing of new threads. */
129
130 static void
131 record_btrace_auto_enable (void)
132 {
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137 }
138
139 /* Disable automatic tracing of new threads. */
140
141 static void
142 record_btrace_auto_disable (void)
143 {
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152 }
153
154 /* The to_open method of target record-btrace. */
155
156 static void
157 record_btrace_open (char *args, int from_tty)
158 {
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
164 record_preopen ();
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193 }
194
195 /* The to_stop_recording method of target record-btrace. */
196
197 static void
198 record_btrace_stop_recording (struct target_ops *self)
199 {
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209 }
210
211 /* The to_close method of target record-btrace. */
212
213 static void
214 record_btrace_close (struct target_ops *self)
215 {
216 struct thread_info *tp;
217
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
226 }
227
228 /* The to_info_record method of target record-btrace. */
229
230 static void
231 record_btrace_info (struct target_ops *self)
232 {
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
245 insns = 0;
246 calls = 0;
247
248 btinfo = &tp->btrace;
249
250 if (!btrace_is_empty (tp))
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
262 }
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
271 }
272
273 /* Print an unsigned int. */
274
275 static void
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277 {
278 ui_out_field_fmt (uiout, fld, "%u", val);
279 }
280
281 /* Disassemble a section of the recorded instruction trace. */
282
283 static void
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
287 {
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
290
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
293
294 gdbarch = target_gdbarch ();
295
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
297 {
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
309 }
310 }
311
312 /* The to_insn_history method of target record-btrace. */
313
314 static void
315 record_btrace_insn_history (struct target_ops *self, int size, int flags)
316 {
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
327 context = abs (size);
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
334 {
335 struct btrace_insn_iterator *replay;
336
337 DEBUG ("insn-history (0x%x): %d", flags, size);
338
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
363 }
364 else
365 {
366 begin = history->begin;
367 end = history->end;
368
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
371
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
382 }
383
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
393
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
396 }
397
398 /* The to_insn_history_range method of target record-btrace. */
399
400 static void
401 record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
403 {
404 struct btrace_thread_info *btinfo;
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
409 unsigned int low, high;
410 int found;
411
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
415 low = from;
416 high = to;
417
418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
419
420 /* Check for wrap-arounds. */
421 if (low != from || high != to)
422 error (_("Bad range."));
423
424 if (high < low)
425 error (_("Bad range."));
426
427 btinfo = require_btrace ();
428
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
432
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
435 {
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
438 }
439 else
440 {
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
443 }
444
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
447
448 do_cleanups (uiout_cleanup);
449 }
450
451 /* The to_insn_history_from method of target record-btrace. */
452
453 static void
454 record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
456 {
457 ULONGEST begin, end, context;
458
459 context = abs (size);
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
462
463 if (size < 0)
464 {
465 end = from;
466
467 if (from < context)
468 begin = 0;
469 else
470 begin = from - context + 1;
471 }
472 else
473 {
474 begin = from;
475 end = from + context - 1;
476
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
480 }
481
482 record_btrace_insn_history_range (self, begin, end, flags);
483 }
484
485 /* Print the instruction number range for a function call history line. */
486
487 static void
488 btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
490 {
491 unsigned int begin, end, size;
492
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
495
496 begin = bfun->insn_offset;
497 end = begin + size - 1;
498
499 ui_out_field_uint (uiout, "insn begin", begin);
500 ui_out_text (uiout, ",");
501 ui_out_field_uint (uiout, "insn end", end);
502 }
503
504 /* Print the source line information for a function call history line. */
505
506 static void
507 btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
509 {
510 struct symbol *sym;
511 int begin, end;
512
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
516
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
519
520 begin = bfun->lbegin;
521 end = bfun->lend;
522
523 if (end < begin)
524 return;
525
526 ui_out_text (uiout, ":");
527 ui_out_field_int (uiout, "min line", begin);
528
529 if (end == begin)
530 return;
531
532 ui_out_text (uiout, ",");
533 ui_out_field_int (uiout, "max line", end);
534 }
535
536 /* Get the name of a branch trace function. */
537
538 static const char *
539 btrace_get_bfun_name (const struct btrace_function *bfun)
540 {
541 struct minimal_symbol *msym;
542 struct symbol *sym;
543
544 if (bfun == NULL)
545 return "??";
546
547 msym = bfun->msym;
548 sym = bfun->sym;
549
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
556 }
557
558 /* Disassemble a section of the recorded function trace. */
559
560 static void
561 btrace_call_history (struct ui_out *uiout,
562 const struct btrace_thread_info *btinfo,
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
565 enum record_print_flag flags)
566 {
567 struct btrace_call_iterator it;
568
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
571
572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
573 {
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
577
578 bfun = btrace_call_get (&it);
579 sym = bfun->sym;
580 msym = bfun->msym;
581
582 /* Print the function index. */
583 ui_out_field_uint (uiout, "index", bfun->number);
584 ui_out_text (uiout, "\t");
585
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
587 {
588 int level = bfun->level + btinfo->level, i;
589
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
592 }
593
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
600
601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
602 {
603 ui_out_text (uiout, _("\tinst "));
604 btrace_call_history_insn_range (uiout, bfun);
605 }
606
607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
608 {
609 ui_out_text (uiout, _("\tat "));
610 btrace_call_history_src_line (uiout, bfun);
611 }
612
613 ui_out_text (uiout, "\n");
614 }
615 }
616
617 /* The to_call_history method of target record-btrace. */
618
619 static void
620 record_btrace_call_history (struct target_ops *self, int size, int flags)
621 {
622 struct btrace_thread_info *btinfo;
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
627 unsigned int context, covered;
628
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
632 context = abs (size);
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
635
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
639 {
640 struct btrace_insn_iterator *replay;
641
642 DEBUG ("call-history (0x%x): %d", flags, size);
643
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
648 {
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
651 }
652 else
653 btrace_call_end (&begin, btinfo);
654
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
660 {
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
665 }
666 else
667 {
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
670 }
671 }
672 else
673 {
674 begin = history->begin;
675 end = history->end;
676
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
679
680 if (size < 0)
681 {
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
684 }
685 else
686 {
687 begin = end;
688 covered = btrace_call_next (&end, context);
689 }
690 }
691
692 if (covered > 0)
693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
694 else
695 {
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 }
701
702 btrace_set_call_history (btinfo, &begin, &end);
703 do_cleanups (uiout_cleanup);
704 }
705
706 /* The to_call_history_range method of target record-btrace. */
707
708 static void
709 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
710 {
711 struct btrace_thread_info *btinfo;
712 struct btrace_call_history *history;
713 struct btrace_call_iterator begin, end;
714 struct cleanup *uiout_cleanup;
715 struct ui_out *uiout;
716 unsigned int low, high;
717 int found;
718
719 uiout = current_uiout;
720 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
721 "func history");
722 low = from;
723 high = to;
724
725 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
726
727 /* Check for wrap-arounds. */
728 if (low != from || high != to)
729 error (_("Bad range."));
730
731 if (high < low)
732 error (_("Bad range."));
733
734 btinfo = require_btrace ();
735
736 found = btrace_find_call_by_number (&begin, btinfo, low);
737 if (found == 0)
738 error (_("Range out of bounds."));
739
740 found = btrace_find_call_by_number (&end, btinfo, high);
741 if (found == 0)
742 {
743 /* Silently truncate the range. */
744 btrace_call_end (&end, btinfo);
745 }
746 else
747 {
748 /* We want both begin and end to be inclusive. */
749 btrace_call_next (&end, 1);
750 }
751
752 btrace_call_history (uiout, btinfo, &begin, &end, flags);
753 btrace_set_call_history (btinfo, &begin, &end);
754
755 do_cleanups (uiout_cleanup);
756 }
757
758 /* The to_call_history_from method of target record-btrace. */
759
760 static void
761 record_btrace_call_history_from (struct target_ops *self,
762 ULONGEST from, int size, int flags)
763 {
764 ULONGEST begin, end, context;
765
766 context = abs (size);
767 if (context == 0)
768 error (_("Bad record function-call-history-size."));
769
770 if (size < 0)
771 {
772 end = from;
773
774 if (from < context)
775 begin = 0;
776 else
777 begin = from - context + 1;
778 }
779 else
780 {
781 begin = from;
782 end = from + context - 1;
783
784 /* Check for wrap-around. */
785 if (end < begin)
786 end = ULONGEST_MAX;
787 }
788
789 record_btrace_call_history_range (begin, end, flags);
790 }
791
792 /* The to_record_is_replaying method of target record-btrace. */
793
794 static int
795 record_btrace_is_replaying (struct target_ops *self)
796 {
797 struct thread_info *tp;
798
799 ALL_THREADS (tp)
800 if (btrace_is_replaying (tp))
801 return 1;
802
803 return 0;
804 }
805
806 /* The to_xfer_partial method of target record-btrace. */
807
808 static enum target_xfer_status
809 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
810 const char *annex, gdb_byte *readbuf,
811 const gdb_byte *writebuf, ULONGEST offset,
812 ULONGEST len, ULONGEST *xfered_len)
813 {
814 struct target_ops *t;
815
816 /* Filter out requests that don't make sense during replay. */
817 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
818 {
819 switch (object)
820 {
821 case TARGET_OBJECT_MEMORY:
822 {
823 struct target_section *section;
824
825 /* We do not allow writing memory in general. */
826 if (writebuf != NULL)
827 {
828 *xfered_len = len;
829 return TARGET_XFER_E_UNAVAILABLE;
830 }
831
832 /* We allow reading readonly memory. */
833 section = target_section_by_addr (ops, offset);
834 if (section != NULL)
835 {
836 /* Check if the section we found is readonly. */
837 if ((bfd_get_section_flags (section->the_bfd_section->owner,
838 section->the_bfd_section)
839 & SEC_READONLY) != 0)
840 {
841 /* Truncate the request to fit into this section. */
842 len = min (len, section->endaddr - offset);
843 break;
844 }
845 }
846
847 *xfered_len = len;
848 return TARGET_XFER_E_UNAVAILABLE;
849 }
850 }
851 }
852
853 /* Forward the request. */
854 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
855 if (ops->to_xfer_partial != NULL)
856 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
857 offset, len, xfered_len);
858
859 *xfered_len = len;
860 return TARGET_XFER_E_UNAVAILABLE;
861 }
862
863 /* The to_insert_breakpoint method of target record-btrace. */
864
865 static int
866 record_btrace_insert_breakpoint (struct target_ops *ops,
867 struct gdbarch *gdbarch,
868 struct bp_target_info *bp_tgt)
869 {
870 volatile struct gdb_exception except;
871 int old, ret;
872
873 /* Inserting breakpoints requires accessing memory. Allow it for the
874 duration of this function. */
875 old = record_btrace_allow_memory_access;
876 record_btrace_allow_memory_access = 1;
877
878 ret = 0;
879 TRY_CATCH (except, RETURN_MASK_ALL)
880 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
881
882 record_btrace_allow_memory_access = old;
883
884 if (except.reason < 0)
885 throw_exception (except);
886
887 return ret;
888 }
889
890 /* The to_remove_breakpoint method of target record-btrace. */
891
892 static int
893 record_btrace_remove_breakpoint (struct target_ops *ops,
894 struct gdbarch *gdbarch,
895 struct bp_target_info *bp_tgt)
896 {
897 volatile struct gdb_exception except;
898 int old, ret;
899
900 /* Removing breakpoints requires accessing memory. Allow it for the
901 duration of this function. */
902 old = record_btrace_allow_memory_access;
903 record_btrace_allow_memory_access = 1;
904
905 ret = 0;
906 TRY_CATCH (except, RETURN_MASK_ALL)
907 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
908
909 record_btrace_allow_memory_access = old;
910
911 if (except.reason < 0)
912 throw_exception (except);
913
914 return ret;
915 }
916
917 /* The to_fetch_registers method of target record-btrace. */
918
919 static void
920 record_btrace_fetch_registers (struct target_ops *ops,
921 struct regcache *regcache, int regno)
922 {
923 struct btrace_insn_iterator *replay;
924 struct thread_info *tp;
925
926 tp = find_thread_ptid (inferior_ptid);
927 gdb_assert (tp != NULL);
928
929 replay = tp->btrace.replay;
930 if (replay != NULL)
931 {
932 const struct btrace_insn *insn;
933 struct gdbarch *gdbarch;
934 int pcreg;
935
936 gdbarch = get_regcache_arch (regcache);
937 pcreg = gdbarch_pc_regnum (gdbarch);
938 if (pcreg < 0)
939 return;
940
941 /* We can only provide the PC register. */
942 if (regno >= 0 && regno != pcreg)
943 return;
944
945 insn = btrace_insn_get (replay);
946 gdb_assert (insn != NULL);
947
948 regcache_raw_supply (regcache, regno, &insn->pc);
949 }
950 else
951 {
952 struct target_ops *t;
953
954 for (t = ops->beneath; t != NULL; t = t->beneath)
955 if (t->to_fetch_registers != NULL)
956 {
957 t->to_fetch_registers (t, regcache, regno);
958 break;
959 }
960 }
961 }
962
963 /* The to_store_registers method of target record-btrace. */
964
965 static void
966 record_btrace_store_registers (struct target_ops *ops,
967 struct regcache *regcache, int regno)
968 {
969 struct target_ops *t;
970
971 if (record_btrace_is_replaying (ops))
972 error (_("This record target does not allow writing registers."));
973
974 gdb_assert (may_write_registers != 0);
975
976 for (t = ops->beneath; t != NULL; t = t->beneath)
977 if (t->to_store_registers != NULL)
978 {
979 t->to_store_registers (t, regcache, regno);
980 return;
981 }
982
983 noprocess ();
984 }
985
986 /* The to_prepare_to_store method of target record-btrace. */
987
988 static void
989 record_btrace_prepare_to_store (struct target_ops *ops,
990 struct regcache *regcache)
991 {
992 struct target_ops *t;
993
994 if (record_btrace_is_replaying (ops))
995 return;
996
997 for (t = ops->beneath; t != NULL; t = t->beneath)
998 if (t->to_prepare_to_store != NULL)
999 {
1000 t->to_prepare_to_store (t, regcache);
1001 return;
1002 }
1003 }
1004
1005 /* The branch trace frame cache. */
1006
1007 struct btrace_frame_cache
1008 {
1009 /* The thread. */
1010 struct thread_info *tp;
1011
1012 /* The frame info. */
1013 struct frame_info *frame;
1014
1015 /* The branch trace function segment. */
1016 const struct btrace_function *bfun;
1017 };
1018
1019 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1020
1021 static htab_t bfcache;
1022
1023 /* hash_f for htab_create_alloc of bfcache. */
1024
1025 static hashval_t
1026 bfcache_hash (const void *arg)
1027 {
1028 const struct btrace_frame_cache *cache = arg;
1029
1030 return htab_hash_pointer (cache->frame);
1031 }
1032
1033 /* eq_f for htab_create_alloc of bfcache. */
1034
1035 static int
1036 bfcache_eq (const void *arg1, const void *arg2)
1037 {
1038 const struct btrace_frame_cache *cache1 = arg1;
1039 const struct btrace_frame_cache *cache2 = arg2;
1040
1041 return cache1->frame == cache2->frame;
1042 }
1043
1044 /* Create a new btrace frame cache. */
1045
1046 static struct btrace_frame_cache *
1047 bfcache_new (struct frame_info *frame)
1048 {
1049 struct btrace_frame_cache *cache;
1050 void **slot;
1051
1052 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1053 cache->frame = frame;
1054
1055 slot = htab_find_slot (bfcache, cache, INSERT);
1056 gdb_assert (*slot == NULL);
1057 *slot = cache;
1058
1059 return cache;
1060 }
1061
1062 /* Extract the branch trace function from a branch trace frame. */
1063
1064 static const struct btrace_function *
1065 btrace_get_frame_function (struct frame_info *frame)
1066 {
1067 const struct btrace_frame_cache *cache;
1068 const struct btrace_function *bfun;
1069 struct btrace_frame_cache pattern;
1070 void **slot;
1071
1072 pattern.frame = frame;
1073
1074 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1075 if (slot == NULL)
1076 return NULL;
1077
1078 cache = *slot;
1079 return cache->bfun;
1080 }
1081
1082 /* Implement stop_reason method for record_btrace_frame_unwind. */
1083
1084 static enum unwind_stop_reason
1085 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1086 void **this_cache)
1087 {
1088 const struct btrace_frame_cache *cache;
1089 const struct btrace_function *bfun;
1090
1091 cache = *this_cache;
1092 bfun = cache->bfun;
1093 gdb_assert (bfun != NULL);
1094
1095 if (bfun->up == NULL)
1096 return UNWIND_UNAVAILABLE;
1097
1098 return UNWIND_NO_REASON;
1099 }
1100
1101 /* Implement this_id method for record_btrace_frame_unwind. */
1102
1103 static void
1104 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1105 struct frame_id *this_id)
1106 {
1107 const struct btrace_frame_cache *cache;
1108 const struct btrace_function *bfun;
1109 CORE_ADDR code, special;
1110
1111 cache = *this_cache;
1112
1113 bfun = cache->bfun;
1114 gdb_assert (bfun != NULL);
1115
1116 while (bfun->segment.prev != NULL)
1117 bfun = bfun->segment.prev;
1118
1119 code = get_frame_func (this_frame);
1120 special = bfun->number;
1121
1122 *this_id = frame_id_build_unavailable_stack_special (code, special);
1123
1124 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1125 btrace_get_bfun_name (cache->bfun),
1126 core_addr_to_string_nz (this_id->code_addr),
1127 core_addr_to_string_nz (this_id->special_addr));
1128 }
1129
1130 /* Implement prev_register method for record_btrace_frame_unwind. */
1131
1132 static struct value *
1133 record_btrace_frame_prev_register (struct frame_info *this_frame,
1134 void **this_cache,
1135 int regnum)
1136 {
1137 const struct btrace_frame_cache *cache;
1138 const struct btrace_function *bfun, *caller;
1139 const struct btrace_insn *insn;
1140 struct gdbarch *gdbarch;
1141 CORE_ADDR pc;
1142 int pcreg;
1143
1144 gdbarch = get_frame_arch (this_frame);
1145 pcreg = gdbarch_pc_regnum (gdbarch);
1146 if (pcreg < 0 || regnum != pcreg)
1147 throw_error (NOT_AVAILABLE_ERROR,
1148 _("Registers are not available in btrace record history"));
1149
1150 cache = *this_cache;
1151 bfun = cache->bfun;
1152 gdb_assert (bfun != NULL);
1153
1154 caller = bfun->up;
1155 if (caller == NULL)
1156 throw_error (NOT_AVAILABLE_ERROR,
1157 _("No caller in btrace record history"));
1158
1159 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1160 {
1161 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1162 pc = insn->pc;
1163 }
1164 else
1165 {
1166 insn = VEC_last (btrace_insn_s, caller->insn);
1167 pc = insn->pc;
1168
1169 pc += gdb_insn_length (gdbarch, pc);
1170 }
1171
1172 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1173 btrace_get_bfun_name (bfun), bfun->level,
1174 core_addr_to_string_nz (pc));
1175
1176 return frame_unwind_got_address (this_frame, regnum, pc);
1177 }
1178
1179 /* Implement sniffer method for record_btrace_frame_unwind. */
1180
1181 static int
1182 record_btrace_frame_sniffer (const struct frame_unwind *self,
1183 struct frame_info *this_frame,
1184 void **this_cache)
1185 {
1186 const struct btrace_function *bfun;
1187 struct btrace_frame_cache *cache;
1188 struct thread_info *tp;
1189 struct frame_info *next;
1190
1191 /* THIS_FRAME does not contain a reference to its thread. */
1192 tp = find_thread_ptid (inferior_ptid);
1193 gdb_assert (tp != NULL);
1194
1195 bfun = NULL;
1196 next = get_next_frame (this_frame);
1197 if (next == NULL)
1198 {
1199 const struct btrace_insn_iterator *replay;
1200
1201 replay = tp->btrace.replay;
1202 if (replay != NULL)
1203 bfun = replay->function;
1204 }
1205 else
1206 {
1207 const struct btrace_function *callee;
1208
1209 callee = btrace_get_frame_function (next);
1210 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1211 bfun = callee->up;
1212 }
1213
1214 if (bfun == NULL)
1215 return 0;
1216
1217 DEBUG ("[frame] sniffed frame for %s on level %d",
1218 btrace_get_bfun_name (bfun), bfun->level);
1219
1220 /* This is our frame. Initialize the frame cache. */
1221 cache = bfcache_new (this_frame);
1222 cache->tp = tp;
1223 cache->bfun = bfun;
1224
1225 *this_cache = cache;
1226 return 1;
1227 }
1228
1229 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1230
1231 static int
1232 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1233 struct frame_info *this_frame,
1234 void **this_cache)
1235 {
1236 const struct btrace_function *bfun, *callee;
1237 struct btrace_frame_cache *cache;
1238 struct frame_info *next;
1239
1240 next = get_next_frame (this_frame);
1241 if (next == NULL)
1242 return 0;
1243
1244 callee = btrace_get_frame_function (next);
1245 if (callee == NULL)
1246 return 0;
1247
1248 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1249 return 0;
1250
1251 bfun = callee->up;
1252 if (bfun == NULL)
1253 return 0;
1254
1255 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1256 btrace_get_bfun_name (bfun), bfun->level);
1257
1258 /* This is our frame. Initialize the frame cache. */
1259 cache = bfcache_new (this_frame);
1260 cache->tp = find_thread_ptid (inferior_ptid);
1261 cache->bfun = bfun;
1262
1263 *this_cache = cache;
1264 return 1;
1265 }
1266
1267 static void
1268 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1269 {
1270 struct btrace_frame_cache *cache;
1271 void **slot;
1272
1273 cache = this_cache;
1274
1275 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1276 gdb_assert (slot != NULL);
1277
1278 htab_remove_elt (bfcache, cache);
1279 }
1280
1281 /* btrace recording does not store previous memory content, neither the stack
1282 frames content. Any unwinding would return errorneous results as the stack
1283 contents no longer matches the changed PC value restored from history.
1284 Therefore this unwinder reports any possibly unwound registers as
1285 <unavailable>. */
1286
1287 const struct frame_unwind record_btrace_frame_unwind =
1288 {
1289 NORMAL_FRAME,
1290 record_btrace_frame_unwind_stop_reason,
1291 record_btrace_frame_this_id,
1292 record_btrace_frame_prev_register,
1293 NULL,
1294 record_btrace_frame_sniffer,
1295 record_btrace_frame_dealloc_cache
1296 };
1297
1298 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1299 {
1300 TAILCALL_FRAME,
1301 record_btrace_frame_unwind_stop_reason,
1302 record_btrace_frame_this_id,
1303 record_btrace_frame_prev_register,
1304 NULL,
1305 record_btrace_tailcall_frame_sniffer,
1306 record_btrace_frame_dealloc_cache
1307 };
1308
1309 /* Indicate that TP should be resumed according to FLAG. */
1310
1311 static void
1312 record_btrace_resume_thread (struct thread_info *tp,
1313 enum btrace_thread_flag flag)
1314 {
1315 struct btrace_thread_info *btinfo;
1316
1317 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1318
1319 btinfo = &tp->btrace;
1320
1321 if ((btinfo->flags & BTHR_MOVE) != 0)
1322 error (_("Thread already moving."));
1323
1324 /* Fetch the latest branch trace. */
1325 btrace_fetch (tp);
1326
1327 btinfo->flags |= flag;
1328 }
1329
1330 /* Find the thread to resume given a PTID. */
1331
1332 static struct thread_info *
1333 record_btrace_find_resume_thread (ptid_t ptid)
1334 {
1335 struct thread_info *tp;
1336
1337 /* When asked to resume everything, we pick the current thread. */
1338 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1339 ptid = inferior_ptid;
1340
1341 return find_thread_ptid (ptid);
1342 }
1343
1344 /* Start replaying a thread. */
1345
1346 static struct btrace_insn_iterator *
1347 record_btrace_start_replaying (struct thread_info *tp)
1348 {
1349 volatile struct gdb_exception except;
1350 struct btrace_insn_iterator *replay;
1351 struct btrace_thread_info *btinfo;
1352 int executing;
1353
1354 btinfo = &tp->btrace;
1355 replay = NULL;
1356
1357 /* We can't start replaying without trace. */
1358 if (btinfo->begin == NULL)
1359 return NULL;
1360
1361 /* Clear the executing flag to allow changes to the current frame.
1362 We are not actually running, yet. We just started a reverse execution
1363 command or a record goto command.
1364 For the latter, EXECUTING is false and this has no effect.
1365 For the former, EXECUTING is true and we're in to_wait, about to
1366 move the thread. Since we need to recompute the stack, we temporarily
1367 set EXECUTING to flase. */
1368 executing = is_executing (tp->ptid);
1369 set_executing (tp->ptid, 0);
1370
1371 /* GDB stores the current frame_id when stepping in order to detects steps
1372 into subroutines.
1373 Since frames are computed differently when we're replaying, we need to
1374 recompute those stored frames and fix them up so we can still detect
1375 subroutines after we started replaying. */
1376 TRY_CATCH (except, RETURN_MASK_ALL)
1377 {
1378 struct frame_info *frame;
1379 struct frame_id frame_id;
1380 int upd_step_frame_id, upd_step_stack_frame_id;
1381
1382 /* The current frame without replaying - computed via normal unwind. */
1383 frame = get_current_frame ();
1384 frame_id = get_frame_id (frame);
1385
1386 /* Check if we need to update any stepping-related frame id's. */
1387 upd_step_frame_id = frame_id_eq (frame_id,
1388 tp->control.step_frame_id);
1389 upd_step_stack_frame_id = frame_id_eq (frame_id,
1390 tp->control.step_stack_frame_id);
1391
1392 /* We start replaying at the end of the branch trace. This corresponds
1393 to the current instruction. */
1394 replay = xmalloc (sizeof (*replay));
1395 btrace_insn_end (replay, btinfo);
1396
1397 /* We're not replaying, yet. */
1398 gdb_assert (btinfo->replay == NULL);
1399 btinfo->replay = replay;
1400
1401 /* Make sure we're not using any stale registers. */
1402 registers_changed_ptid (tp->ptid);
1403
1404 /* The current frame with replaying - computed via btrace unwind. */
1405 frame = get_current_frame ();
1406 frame_id = get_frame_id (frame);
1407
1408 /* Replace stepping related frames where necessary. */
1409 if (upd_step_frame_id)
1410 tp->control.step_frame_id = frame_id;
1411 if (upd_step_stack_frame_id)
1412 tp->control.step_stack_frame_id = frame_id;
1413 }
1414
1415 /* Restore the previous execution state. */
1416 set_executing (tp->ptid, executing);
1417
1418 if (except.reason < 0)
1419 {
1420 xfree (btinfo->replay);
1421 btinfo->replay = NULL;
1422
1423 registers_changed_ptid (tp->ptid);
1424
1425 throw_exception (except);
1426 }
1427
1428 return replay;
1429 }
1430
1431 /* Stop replaying a thread. */
1432
1433 static void
1434 record_btrace_stop_replaying (struct thread_info *tp)
1435 {
1436 struct btrace_thread_info *btinfo;
1437
1438 btinfo = &tp->btrace;
1439
1440 xfree (btinfo->replay);
1441 btinfo->replay = NULL;
1442
1443 /* Make sure we're not leaving any stale registers. */
1444 registers_changed_ptid (tp->ptid);
1445 }
1446
1447 /* The to_resume method of target record-btrace. */
1448
1449 static void
1450 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1451 enum gdb_signal signal)
1452 {
1453 struct thread_info *tp, *other;
1454 enum btrace_thread_flag flag;
1455
1456 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1457
1458 tp = record_btrace_find_resume_thread (ptid);
1459 if (tp == NULL)
1460 error (_("Cannot find thread to resume."));
1461
1462 /* Stop replaying other threads if the thread to resume is not replaying. */
1463 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1464 ALL_THREADS (other)
1465 record_btrace_stop_replaying (other);
1466
1467 /* As long as we're not replaying, just forward the request. */
1468 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1469 {
1470 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1471 if (ops->to_resume != NULL)
1472 return ops->to_resume (ops, ptid, step, signal);
1473
1474 error (_("Cannot find target for stepping."));
1475 }
1476
1477 /* Compute the btrace thread flag for the requested move. */
1478 if (step == 0)
1479 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1480 else
1481 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1482
1483 /* At the moment, we only move a single thread. We could also move
1484 all threads in parallel by single-stepping each resumed thread
1485 until the first runs into an event.
1486 When we do that, we would want to continue all other threads.
1487 For now, just resume one thread to not confuse to_wait. */
1488 record_btrace_resume_thread (tp, flag);
1489
1490 /* We just indicate the resume intent here. The actual stepping happens in
1491 record_btrace_wait below. */
1492 }
1493
1494 /* Find a thread to move. */
1495
1496 static struct thread_info *
1497 record_btrace_find_thread_to_move (ptid_t ptid)
1498 {
1499 struct thread_info *tp;
1500
1501 /* First check the parameter thread. */
1502 tp = find_thread_ptid (ptid);
1503 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1504 return tp;
1505
1506 /* Otherwise, find one other thread that has been resumed. */
1507 ALL_THREADS (tp)
1508 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1509 return tp;
1510
1511 return NULL;
1512 }
1513
1514 /* Return a target_waitstatus indicating that we ran out of history. */
1515
1516 static struct target_waitstatus
1517 btrace_step_no_history (void)
1518 {
1519 struct target_waitstatus status;
1520
1521 status.kind = TARGET_WAITKIND_NO_HISTORY;
1522
1523 return status;
1524 }
1525
1526 /* Return a target_waitstatus indicating that a step finished. */
1527
1528 static struct target_waitstatus
1529 btrace_step_stopped (void)
1530 {
1531 struct target_waitstatus status;
1532
1533 status.kind = TARGET_WAITKIND_STOPPED;
1534 status.value.sig = GDB_SIGNAL_TRAP;
1535
1536 return status;
1537 }
1538
1539 /* Clear the record histories. */
1540
1541 static void
1542 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1543 {
1544 xfree (btinfo->insn_history);
1545 xfree (btinfo->call_history);
1546
1547 btinfo->insn_history = NULL;
1548 btinfo->call_history = NULL;
1549 }
1550
1551 /* Step a single thread. */
1552
1553 static struct target_waitstatus
1554 record_btrace_step_thread (struct thread_info *tp)
1555 {
1556 struct btrace_insn_iterator *replay, end;
1557 struct btrace_thread_info *btinfo;
1558 struct address_space *aspace;
1559 struct inferior *inf;
1560 enum btrace_thread_flag flags;
1561 unsigned int steps;
1562
1563 btinfo = &tp->btrace;
1564 replay = btinfo->replay;
1565
1566 flags = btinfo->flags & BTHR_MOVE;
1567 btinfo->flags &= ~BTHR_MOVE;
1568
1569 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1570
1571 switch (flags)
1572 {
1573 default:
1574 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1575
1576 case BTHR_STEP:
1577 /* We're done if we're not replaying. */
1578 if (replay == NULL)
1579 return btrace_step_no_history ();
1580
1581 /* We are always able to step at least once. */
1582 steps = btrace_insn_next (replay, 1);
1583 gdb_assert (steps == 1);
1584
1585 /* Determine the end of the instruction trace. */
1586 btrace_insn_end (&end, btinfo);
1587
1588 /* We stop replaying if we reached the end of the trace. */
1589 if (btrace_insn_cmp (replay, &end) == 0)
1590 record_btrace_stop_replaying (tp);
1591
1592 return btrace_step_stopped ();
1593
1594 case BTHR_RSTEP:
1595 /* Start replaying if we're not already doing so. */
1596 if (replay == NULL)
1597 replay = record_btrace_start_replaying (tp);
1598
1599 /* If we can't step any further, we reached the end of the history. */
1600 steps = btrace_insn_prev (replay, 1);
1601 if (steps == 0)
1602 return btrace_step_no_history ();
1603
1604 return btrace_step_stopped ();
1605
1606 case BTHR_CONT:
1607 /* We're done if we're not replaying. */
1608 if (replay == NULL)
1609 return btrace_step_no_history ();
1610
1611 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1612 aspace = inf->aspace;
1613
1614 /* Determine the end of the instruction trace. */
1615 btrace_insn_end (&end, btinfo);
1616
1617 for (;;)
1618 {
1619 const struct btrace_insn *insn;
1620
1621 /* We are always able to step at least once. */
1622 steps = btrace_insn_next (replay, 1);
1623 gdb_assert (steps == 1);
1624
1625 /* We stop replaying if we reached the end of the trace. */
1626 if (btrace_insn_cmp (replay, &end) == 0)
1627 {
1628 record_btrace_stop_replaying (tp);
1629 return btrace_step_no_history ();
1630 }
1631
1632 insn = btrace_insn_get (replay);
1633 gdb_assert (insn);
1634
1635 DEBUG ("stepping %d (%s) ... %s", tp->num,
1636 target_pid_to_str (tp->ptid),
1637 core_addr_to_string_nz (insn->pc));
1638
1639 if (breakpoint_here_p (aspace, insn->pc))
1640 return btrace_step_stopped ();
1641 }
1642
1643 case BTHR_RCONT:
1644 /* Start replaying if we're not already doing so. */
1645 if (replay == NULL)
1646 replay = record_btrace_start_replaying (tp);
1647
1648 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1649 aspace = inf->aspace;
1650
1651 for (;;)
1652 {
1653 const struct btrace_insn *insn;
1654
1655 /* If we can't step any further, we're done. */
1656 steps = btrace_insn_prev (replay, 1);
1657 if (steps == 0)
1658 return btrace_step_no_history ();
1659
1660 insn = btrace_insn_get (replay);
1661 gdb_assert (insn);
1662
1663 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1664 target_pid_to_str (tp->ptid),
1665 core_addr_to_string_nz (insn->pc));
1666
1667 if (breakpoint_here_p (aspace, insn->pc))
1668 return btrace_step_stopped ();
1669 }
1670 }
1671 }
1672
1673 /* The to_wait method of target record-btrace. */
1674
1675 static ptid_t
1676 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1677 struct target_waitstatus *status, int options)
1678 {
1679 struct thread_info *tp, *other;
1680
1681 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1682
1683 /* As long as we're not replaying, just forward the request. */
1684 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1685 {
1686 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1687 if (ops->to_wait != NULL)
1688 return ops->to_wait (ops, ptid, status, options);
1689
1690 error (_("Cannot find target for waiting."));
1691 }
1692
1693 /* Let's find a thread to move. */
1694 tp = record_btrace_find_thread_to_move (ptid);
1695 if (tp == NULL)
1696 {
1697 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1698
1699 status->kind = TARGET_WAITKIND_IGNORE;
1700 return minus_one_ptid;
1701 }
1702
1703 /* We only move a single thread. We're not able to correlate threads. */
1704 *status = record_btrace_step_thread (tp);
1705
1706 /* Stop all other threads. */
1707 if (!non_stop)
1708 ALL_THREADS (other)
1709 other->btrace.flags &= ~BTHR_MOVE;
1710
1711 /* Start record histories anew from the current position. */
1712 record_btrace_clear_histories (&tp->btrace);
1713
1714 /* We moved the replay position but did not update registers. */
1715 registers_changed_ptid (tp->ptid);
1716
1717 return tp->ptid;
1718 }
1719
1720 /* The to_can_execute_reverse method of target record-btrace. */
1721
1722 static int
1723 record_btrace_can_execute_reverse (struct target_ops *self)
1724 {
1725 return 1;
1726 }
1727
1728 /* The to_decr_pc_after_break method of target record-btrace. */
1729
1730 static CORE_ADDR
1731 record_btrace_decr_pc_after_break (struct target_ops *ops,
1732 struct gdbarch *gdbarch)
1733 {
1734 /* When replaying, we do not actually execute the breakpoint instruction
1735 so there is no need to adjust the PC after hitting a breakpoint. */
1736 if (record_btrace_is_replaying (ops))
1737 return 0;
1738
1739 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
1740 }
1741
1742 /* The to_find_new_threads method of target record-btrace. */
1743
1744 static void
1745 record_btrace_find_new_threads (struct target_ops *ops)
1746 {
1747 /* Don't expect new threads if we're replaying. */
1748 if (record_btrace_is_replaying (ops))
1749 return;
1750
1751 /* Forward the request. */
1752 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1753 if (ops->to_find_new_threads != NULL)
1754 {
1755 ops->to_find_new_threads (ops);
1756 break;
1757 }
1758 }
1759
1760 /* The to_thread_alive method of target record-btrace. */
1761
1762 static int
1763 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1764 {
1765 /* We don't add or remove threads during replay. */
1766 if (record_btrace_is_replaying (ops))
1767 return find_thread_ptid (ptid) != NULL;
1768
1769 /* Forward the request. */
1770 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1771 if (ops->to_thread_alive != NULL)
1772 return ops->to_thread_alive (ops, ptid);
1773
1774 return 0;
1775 }
1776
1777 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1778 is stopped. */
1779
1780 static void
1781 record_btrace_set_replay (struct thread_info *tp,
1782 const struct btrace_insn_iterator *it)
1783 {
1784 struct btrace_thread_info *btinfo;
1785
1786 btinfo = &tp->btrace;
1787
1788 if (it == NULL || it->function == NULL)
1789 record_btrace_stop_replaying (tp);
1790 else
1791 {
1792 if (btinfo->replay == NULL)
1793 record_btrace_start_replaying (tp);
1794 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1795 return;
1796
1797 *btinfo->replay = *it;
1798 registers_changed_ptid (tp->ptid);
1799 }
1800
1801 /* Start anew from the new replay position. */
1802 record_btrace_clear_histories (btinfo);
1803 }
1804
1805 /* The to_goto_record_begin method of target record-btrace. */
1806
1807 static void
1808 record_btrace_goto_begin (struct target_ops *self)
1809 {
1810 struct thread_info *tp;
1811 struct btrace_insn_iterator begin;
1812
1813 tp = require_btrace_thread ();
1814
1815 btrace_insn_begin (&begin, &tp->btrace);
1816 record_btrace_set_replay (tp, &begin);
1817
1818 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1819 }
1820
1821 /* The to_goto_record_end method of target record-btrace. */
1822
1823 static void
1824 record_btrace_goto_end (struct target_ops *ops)
1825 {
1826 struct thread_info *tp;
1827
1828 tp = require_btrace_thread ();
1829
1830 record_btrace_set_replay (tp, NULL);
1831
1832 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1833 }
1834
1835 /* The to_goto_record method of target record-btrace. */
1836
1837 static void
1838 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1839 {
1840 struct thread_info *tp;
1841 struct btrace_insn_iterator it;
1842 unsigned int number;
1843 int found;
1844
1845 number = insn;
1846
1847 /* Check for wrap-arounds. */
1848 if (number != insn)
1849 error (_("Instruction number out of range."));
1850
1851 tp = require_btrace_thread ();
1852
1853 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1854 if (found == 0)
1855 error (_("No such instruction."));
1856
1857 record_btrace_set_replay (tp, &it);
1858
1859 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1860 }
1861
1862 /* Initialize the record-btrace target ops. */
1863
1864 static void
1865 init_record_btrace_ops (void)
1866 {
1867 struct target_ops *ops;
1868
1869 ops = &record_btrace_ops;
1870 ops->to_shortname = "record-btrace";
1871 ops->to_longname = "Branch tracing target";
1872 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1873 ops->to_open = record_btrace_open;
1874 ops->to_close = record_btrace_close;
1875 ops->to_detach = record_detach;
1876 ops->to_disconnect = record_disconnect;
1877 ops->to_mourn_inferior = record_mourn_inferior;
1878 ops->to_kill = record_kill;
1879 ops->to_create_inferior = find_default_create_inferior;
1880 ops->to_stop_recording = record_btrace_stop_recording;
1881 ops->to_info_record = record_btrace_info;
1882 ops->to_insn_history = record_btrace_insn_history;
1883 ops->to_insn_history_from = record_btrace_insn_history_from;
1884 ops->to_insn_history_range = record_btrace_insn_history_range;
1885 ops->to_call_history = record_btrace_call_history;
1886 ops->to_call_history_from = record_btrace_call_history_from;
1887 ops->to_call_history_range = record_btrace_call_history_range;
1888 ops->to_record_is_replaying = record_btrace_is_replaying;
1889 ops->to_xfer_partial = record_btrace_xfer_partial;
1890 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1891 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1892 ops->to_fetch_registers = record_btrace_fetch_registers;
1893 ops->to_store_registers = record_btrace_store_registers;
1894 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1895 ops->to_get_unwinder = &record_btrace_frame_unwind;
1896 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1897 ops->to_resume = record_btrace_resume;
1898 ops->to_wait = record_btrace_wait;
1899 ops->to_find_new_threads = record_btrace_find_new_threads;
1900 ops->to_thread_alive = record_btrace_thread_alive;
1901 ops->to_goto_record_begin = record_btrace_goto_begin;
1902 ops->to_goto_record_end = record_btrace_goto_end;
1903 ops->to_goto_record = record_btrace_goto;
1904 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1905 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1906 ops->to_stratum = record_stratum;
1907 ops->to_magic = OPS_MAGIC;
1908 }
1909
1910 /* Alias for "target record". */
1911
1912 static void
1913 cmd_record_btrace_start (char *args, int from_tty)
1914 {
1915 if (args != NULL && *args != 0)
1916 error (_("Invalid argument."));
1917
1918 execute_command ("target record-btrace", from_tty);
1919 }
1920
1921 void _initialize_record_btrace (void);
1922
1923 /* Initialize btrace commands. */
1924
1925 void
1926 _initialize_record_btrace (void)
1927 {
1928 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1929 _("Start branch trace recording."),
1930 &record_cmdlist);
1931 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1932
1933 init_record_btrace_ops ();
1934 add_target (&record_btrace_ops);
1935
1936 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1937 xcalloc, xfree);
1938 }