1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
28 #ifdef HAVE_SYS_SYSCALL_H
29 #include <sys/syscall.h>
32 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include "nat/gdb_ptrace.h"
37 #include <sys/types.h>
39 #include <sys/utsname.h>
41 /* A branch trace record in perf_event. */
44 /* The linear address of the branch source. */
47 /* The linear address of the branch destination. */
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
54 /* The perf_event sample header. */
55 struct perf_event_header header
;
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts
;
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
65 struct btrace_cpu cpu
;
66 unsigned int eax
, ebx
, ecx
, edx
;
69 memset (&cpu
, 0, sizeof (cpu
));
71 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
74 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
75 && edx
== signature_INTEL_edx
)
77 unsigned int cpuid
, ignore
;
79 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
82 cpu
.vendor
= CV_INTEL
;
84 cpu
.family
= (cpuid
>> 8) & 0xf;
85 cpu
.model
= (cpuid
>> 4) & 0xf;
87 if (cpu
.family
== 0x6)
88 cpu
.model
+= (cpuid
>> 12) & 0xf0;
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99 perf_event_new_data (const struct perf_event_buffer
*pev
)
101 return *pev
->data_head
!= pev
->last_head
;
104 /* Try to determine the size of a pointer in bits for the OS.
106 This is the same as the size of a pointer for the inferior process
107 except when a 32-bit inferior is running on a 64-bit OS. */
109 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
114 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
117 const gdb_byte
*begin
, *end
, *start
, *stop
;
125 gdb_assert (size
<= data_head
);
126 data_tail
= data_head
- size
;
128 buffer_size
= pev
->size
;
130 start
= begin
+ data_tail
% buffer_size
;
131 stop
= begin
+ data_head
% buffer_size
;
133 buffer
= xmalloc (size
);
136 memcpy (buffer
, start
, stop
- start
);
139 end
= begin
+ buffer_size
;
141 memcpy (buffer
, start
, end
- start
);
142 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
148 /* Copy the perf event buffer data from PEV.
149 Store a pointer to the copy into DATA and its size in SIZE. */
152 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
158 data_head
= *pev
->data_head
;
161 if (data_head
< size
)
162 size
= (size_t) data_head
;
164 *data
= perf_event_read (pev
, data_head
, size
);
167 pev
->last_head
= data_head
;
170 /* Determine the event type.
171 Returns zero on success and fills in TYPE; returns -1 otherwise. */
174 perf_event_pt_event_type (int *type
)
179 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
183 found
= fscanf (file
, "%d", type
);
193 linux_determine_kernel_ptr_bits (void)
198 memset (&utsn
, 0, sizeof (utsn
));
200 errcode
= uname (&utsn
);
204 /* We only need to handle the 64-bit host case, here. For 32-bit host,
205 the pointer size can be filled in later based on the inferior. */
206 if (strcmp (utsn
.machine
, "x86_64") == 0)
212 /* Check whether an address is in the kernel. */
215 perf_event_is_kernel_addr (const struct btrace_target_info
*tinfo
,
220 /* If we don't know the size of a pointer, we can't check. Let's assume it's
221 not a kernel address in this case. */
222 if (tinfo
->ptr_bits
== 0)
225 /* A bit mask for the most significant bit in an address. */
226 mask
= (uint64_t) 1 << (tinfo
->ptr_bits
- 1);
228 /* Check whether the most significant bit in the address is set. */
229 return (addr
& mask
) != 0;
232 /* Check whether a perf event record should be skipped. */
235 perf_event_skip_bts_record (const struct btrace_target_info
*tinfo
,
236 const struct perf_event_bts
*bts
)
238 /* The hardware may report branches from kernel into user space. Branches
239 from user into kernel space will be suppressed. We filter the former to
240 provide a consistent branch trace excluding kernel. */
241 return perf_event_is_kernel_addr (tinfo
, bts
->from
);
244 /* Perform a few consistency checks on a perf event sample record. This is
245 meant to catch cases when we get out of sync with the perf event stream. */
248 perf_event_sample_ok (const struct perf_event_sample
*sample
)
250 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
253 if (sample
->header
.size
!= sizeof (*sample
))
259 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
260 and to addresses (plus a header).
262 Start points into that buffer at the next sample position.
263 We read the collected samples backwards from start.
265 While reading the samples, we convert the information into a list of blocks.
266 For two adjacent samples s1 and s2, we form a block b such that b.begin =
267 s1.to and b.end = s2.from.
269 In case the buffer overflows during sampling, one sample may have its lower
270 part at the end and its upper part at the beginning of the buffer. */
272 static VEC (btrace_block_s
) *
273 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
274 const uint8_t *end
, const uint8_t *start
, size_t size
)
276 VEC (btrace_block_s
) *btrace
= NULL
;
277 struct perf_event_sample sample
;
279 struct btrace_block block
= { 0, 0 };
280 struct regcache
*regcache
;
282 gdb_assert (begin
<= start
);
283 gdb_assert (start
<= end
);
285 /* The first block ends at the current pc. */
286 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
287 block
.end
= regcache_read_pc (regcache
);
289 /* The buffer may contain a partial record as its last entry (i.e. when the
290 buffer size is not a multiple of the sample size). */
291 read
= sizeof (sample
) - 1;
293 for (; read
< size
; read
+= sizeof (sample
))
295 const struct perf_event_sample
*psample
;
297 /* Find the next perf_event sample in a backwards traversal. */
298 start
-= sizeof (sample
);
300 /* If we're still inside the buffer, we're done. */
302 psample
= (const struct perf_event_sample
*) start
;
307 /* We're to the left of the ring buffer, we will wrap around and
308 reappear at the very right of the ring buffer. */
310 missing
= (begin
- start
);
311 start
= (end
- missing
);
313 /* If the entire sample is missing, we're done. */
314 if (missing
== sizeof (sample
))
315 psample
= (const struct perf_event_sample
*) start
;
320 /* The sample wrapped around. The lower part is at the end and
321 the upper part is at the beginning of the buffer. */
322 stack
= (uint8_t *) &sample
;
324 /* Copy the two parts so we have a contiguous sample. */
325 memcpy (stack
, start
, missing
);
326 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
332 if (!perf_event_sample_ok (psample
))
334 warning (_("Branch trace may be incomplete."));
338 if (perf_event_skip_bts_record (tinfo
, &psample
->bts
))
341 /* We found a valid sample, so we can complete the current block. */
342 block
.begin
= psample
->bts
.to
;
344 VEC_safe_push (btrace_block_s
, btrace
, &block
);
346 /* Start the next block. */
347 block
.end
= psample
->bts
.from
;
350 /* Push the last block (i.e. the first one of inferior execution), as well.
351 We don't know where it ends, but we know where it starts. If we're
352 reading delta trace, we can fill in the start address later on.
353 Otherwise we will prune it. */
355 VEC_safe_push (btrace_block_s
, btrace
, &block
);
360 /* Check whether the kernel supports BTS. */
363 kernel_supports_bts (void)
365 struct perf_event_attr attr
;
374 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
378 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
381 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
382 safe_strerror (errno
));
386 status
= raise (SIGTRAP
);
389 warning (_("test bts: cannot raise SIGTRAP: %s."),
390 safe_strerror (errno
));
397 pid
= waitpid (child
, &status
, 0);
400 warning (_("test bts: bad pid %ld, error: %s."),
401 (long) pid
, safe_strerror (errno
));
405 if (!WIFSTOPPED (status
))
407 warning (_("test bts: expected stop. status: %d."),
412 memset (&attr
, 0, sizeof (attr
));
414 attr
.type
= PERF_TYPE_HARDWARE
;
415 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
416 attr
.sample_period
= 1;
417 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
418 attr
.exclude_kernel
= 1;
420 attr
.exclude_idle
= 1;
422 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
426 kill (child
, SIGKILL
);
427 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
429 pid
= waitpid (child
, &status
, 0);
432 warning (_("test bts: bad pid %ld, error: %s."),
433 (long) pid
, safe_strerror (errno
));
434 if (!WIFSIGNALED (status
))
435 warning (_("test bts: expected killed. status: %d."),
443 /* Check whether the kernel supports Intel(R) Processor Trace. */
446 kernel_supports_pt (void)
448 struct perf_event_attr attr
;
450 int status
, file
, type
;
457 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
461 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
464 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
465 safe_strerror (errno
));
469 status
= raise (SIGTRAP
);
472 warning (_("test pt: cannot raise SIGTRAP: %s."),
473 safe_strerror (errno
));
480 pid
= waitpid (child
, &status
, 0);
483 warning (_("test pt: bad pid %ld, error: %s."),
484 (long) pid
, safe_strerror (errno
));
488 if (!WIFSTOPPED (status
))
490 warning (_("test pt: expected stop. status: %d."),
495 status
= perf_event_pt_event_type (&type
);
500 memset (&attr
, 0, sizeof (attr
));
502 attr
.size
= sizeof (attr
);
504 attr
.exclude_kernel
= 1;
506 attr
.exclude_idle
= 1;
508 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
513 kill (child
, SIGKILL
);
514 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
516 pid
= waitpid (child
, &status
, 0);
519 warning (_("test pt: bad pid %ld, error: %s."),
520 (long) pid
, safe_strerror (errno
));
521 if (!WIFSIGNALED (status
))
522 warning (_("test pt: expected killed. status: %d."),
530 /* Check whether an Intel cpu supports BTS. */
533 intel_supports_bts (const struct btrace_cpu
*cpu
)
540 case 0x1a: /* Nehalem */
544 case 0x25: /* Westmere */
547 case 0x2a: /* Sandy Bridge */
549 case 0x3a: /* Ivy Bridge */
551 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
552 "from" information afer an EIST transition, T-states, C1E, or
553 Adaptive Thermal Throttling. */
561 /* Check whether the cpu supports BTS. */
564 cpu_supports_bts (void)
566 struct btrace_cpu cpu
;
568 cpu
= btrace_this_cpu ();
572 /* Don't know about others. Let's assume they do. */
576 return intel_supports_bts (&cpu
);
580 /* Check whether the linux target supports BTS. */
583 linux_supports_bts (void)
589 if (!kernel_supports_bts ())
591 else if (!cpu_supports_bts ())
600 /* Check whether the linux target supports Intel(R) Processor Trace. */
603 linux_supports_pt (void)
609 if (!kernel_supports_pt ())
618 /* See linux-btrace.h. */
621 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
625 case BTRACE_FORMAT_NONE
:
628 case BTRACE_FORMAT_BTS
:
629 return linux_supports_bts ();
631 case BTRACE_FORMAT_PT
:
632 return linux_supports_pt ();
635 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
638 /* Enable branch tracing in BTS format. */
640 static struct btrace_target_info
*
641 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
643 struct perf_event_mmap_page
*header
;
644 struct btrace_target_info
*tinfo
;
645 struct btrace_tinfo_bts
*bts
;
650 tinfo
= XCNEW (struct btrace_target_info
);
652 tinfo
->ptr_bits
= linux_determine_kernel_ptr_bits ();
654 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
655 bts
= &tinfo
->variant
.bts
;
657 bts
->attr
.size
= sizeof (bts
->attr
);
658 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
659 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
660 bts
->attr
.sample_period
= 1;
662 /* We sample from and to address. */
663 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
665 bts
->attr
.exclude_kernel
= 1;
666 bts
->attr
.exclude_hv
= 1;
667 bts
->attr
.exclude_idle
= 1;
669 pid
= ptid_get_lwp (ptid
);
671 pid
= ptid_get_pid (ptid
);
674 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
678 /* Convert the requested size in bytes to pages (rounding up). */
679 pages
= ((size_t) conf
->size
/ PAGE_SIZE
680 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
681 /* We need at least one page. */
685 /* The buffer size can be requested in powers of two pages. Adjust PAGES
686 to the next power of two. */
687 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
688 if ((pages
& ((size_t) 1 << pg
)) != 0)
689 pages
+= ((size_t) 1 << pg
);
691 /* We try to allocate the requested size.
692 If that fails, try to get as much as we can. */
693 for (; pages
> 0; pages
>>= 1)
698 data_size
= (__u64
) pages
* PAGE_SIZE
;
700 /* Don't ask for more than we can represent in the configuration. */
701 if ((__u64
) UINT_MAX
< data_size
)
704 size
= (size_t) data_size
;
705 length
= size
+ PAGE_SIZE
;
707 /* Check for overflows. */
708 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
711 /* The number of pages we request needs to be a power of two. */
712 header
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0);
713 if (header
!= MAP_FAILED
)
720 data_offset
= PAGE_SIZE
;
722 #if defined (PERF_ATTR_SIZE_VER5)
723 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
727 data_offset
= header
->data_offset
;
728 data_size
= header
->data_size
;
730 size
= (unsigned int) data_size
;
732 /* Check for overflows. */
733 if ((__u64
) size
!= data_size
)
735 munmap ((void *) header
, size
+ PAGE_SIZE
);
739 #endif /* defined (PERF_ATTR_SIZE_VER5) */
741 bts
->header
= header
;
742 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
743 bts
->bts
.size
= size
;
744 bts
->bts
.data_head
= &header
->data_head
;
745 bts
->bts
.last_head
= 0ull;
747 tinfo
->conf
.bts
.size
= (unsigned int) size
;
751 /* We were not able to allocate any buffer. */
759 #if defined (PERF_ATTR_SIZE_VER5)
761 /* Enable branch tracing in Intel(R) Processor Trace format. */
763 static struct btrace_target_info
*
764 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
766 struct perf_event_mmap_page
*header
;
767 struct btrace_target_info
*tinfo
;
768 struct btrace_tinfo_pt
*pt
;
770 int pid
, pg
, errcode
, type
;
775 errcode
= perf_event_pt_event_type (&type
);
779 pid
= ptid_get_lwp (ptid
);
781 pid
= ptid_get_pid (ptid
);
783 tinfo
= XCNEW (struct btrace_target_info
);
787 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
788 pt
= &tinfo
->variant
.pt
;
790 pt
->attr
.size
= sizeof (pt
->attr
);
791 pt
->attr
.type
= type
;
793 pt
->attr
.exclude_kernel
= 1;
794 pt
->attr
.exclude_hv
= 1;
795 pt
->attr
.exclude_idle
= 1;
798 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
802 /* Allocate the configuration page. */
803 header
= mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
805 if (header
== MAP_FAILED
)
808 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
810 /* Convert the requested size in bytes to pages (rounding up). */
811 pages
= ((size_t) conf
->size
/ PAGE_SIZE
812 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
813 /* We need at least one page. */
817 /* The buffer size can be requested in powers of two pages. Adjust PAGES
818 to the next power of two. */
819 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
820 if ((pages
& ((size_t) 1 << pg
)) != 0)
821 pages
+= ((size_t) 1 << pg
);
823 /* We try to allocate the requested size.
824 If that fails, try to get as much as we can. */
825 for (; pages
> 0; pages
>>= 1)
830 data_size
= (__u64
) pages
* PAGE_SIZE
;
832 /* Don't ask for more than we can represent in the configuration. */
833 if ((__u64
) UINT_MAX
< data_size
)
836 size
= (size_t) data_size
;
838 /* Check for overflows. */
839 if ((__u64
) size
!= data_size
)
842 header
->aux_size
= data_size
;
845 pt
->pt
.mem
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
847 if (pt
->pt
.mem
!= MAP_FAILED
)
856 pt
->pt
.data_head
= &header
->aux_head
;
858 tinfo
->conf
.pt
.size
= (unsigned int) size
;
862 munmap((void *) header
, PAGE_SIZE
);
872 #else /* !defined (PERF_ATTR_SIZE_VER5) */
874 static struct btrace_target_info
*
875 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
881 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
883 /* See linux-btrace.h. */
885 struct btrace_target_info
*
886 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
888 struct btrace_target_info
*tinfo
;
891 switch (conf
->format
)
893 case BTRACE_FORMAT_NONE
:
896 case BTRACE_FORMAT_BTS
:
897 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
900 case BTRACE_FORMAT_PT
:
901 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
908 /* Disable BTS tracing. */
910 static enum btrace_error
911 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
913 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
916 return BTRACE_ERR_NONE
;
919 /* Disable Intel(R) Processor Trace tracing. */
921 static enum btrace_error
922 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
924 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
925 munmap((void *) tinfo
->header
, PAGE_SIZE
);
928 return BTRACE_ERR_NONE
;
931 /* See linux-btrace.h. */
934 linux_disable_btrace (struct btrace_target_info
*tinfo
)
936 enum btrace_error errcode
;
938 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
939 switch (tinfo
->conf
.format
)
941 case BTRACE_FORMAT_NONE
:
944 case BTRACE_FORMAT_BTS
:
945 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
948 case BTRACE_FORMAT_PT
:
949 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
953 if (errcode
== BTRACE_ERR_NONE
)
959 /* Read branch trace data in BTS format for the thread given by TINFO into
960 BTRACE using the TYPE reading method. */
962 static enum btrace_error
963 linux_read_bts (struct btrace_data_bts
*btrace
,
964 struct btrace_target_info
*tinfo
,
965 enum btrace_read_type type
)
967 struct perf_event_buffer
*pevent
;
968 const uint8_t *begin
, *end
, *start
;
969 size_t buffer_size
, size
;
970 __u64 data_head
, data_tail
;
971 unsigned int retries
= 5;
973 pevent
= &tinfo
->variant
.bts
.bts
;
975 /* For delta reads, we return at least the partial last block containing
977 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
978 return BTRACE_ERR_NONE
;
980 buffer_size
= pevent
->size
;
981 data_tail
= pevent
->last_head
;
983 /* We may need to retry reading the trace. See below. */
986 data_head
= *pevent
->data_head
;
988 /* Delete any leftover trace from the previous iteration. */
989 VEC_free (btrace_block_s
, btrace
->blocks
);
991 if (type
== BTRACE_READ_DELTA
)
995 /* Determine the number of bytes to read and check for buffer
998 /* Check for data head overflows. We might be able to recover from
999 those but they are very unlikely and it's not really worth the
1001 if (data_head
< data_tail
)
1002 return BTRACE_ERR_OVERFLOW
;
1004 /* If the buffer is smaller than the trace delta, we overflowed. */
1005 data_size
= data_head
- data_tail
;
1006 if (buffer_size
< data_size
)
1007 return BTRACE_ERR_OVERFLOW
;
1009 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1010 size
= (size_t) data_size
;
1014 /* Read the entire buffer. */
1017 /* Adjust the size if the buffer has not overflowed, yet. */
1018 if (data_head
< size
)
1019 size
= (size_t) data_head
;
1022 /* Data_head keeps growing; the buffer itself is circular. */
1023 begin
= pevent
->mem
;
1024 start
= begin
+ data_head
% buffer_size
;
1026 if (data_head
<= buffer_size
)
1029 end
= begin
+ pevent
->size
;
1031 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1033 /* The stopping thread notifies its ptracer before it is scheduled out.
1034 On multi-core systems, the debugger might therefore run while the
1035 kernel might be writing the last branch trace records.
1037 Let's check whether the data head moved while we read the trace. */
1038 if (data_head
== *pevent
->data_head
)
1042 pevent
->last_head
= data_head
;
1044 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1045 if we're not doing a delta read. There is no way of filling in its zeroed
1047 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1048 && type
!= BTRACE_READ_DELTA
)
1049 VEC_pop (btrace_block_s
, btrace
->blocks
);
1051 return BTRACE_ERR_NONE
;
1054 /* Fill in the Intel(R) Processor Trace configuration information. */
1057 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1059 conf
->cpu
= btrace_this_cpu ();
1062 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1063 given by TINFO into BTRACE using the TYPE reading method. */
1065 static enum btrace_error
1066 linux_read_pt (struct btrace_data_pt
*btrace
,
1067 struct btrace_target_info
*tinfo
,
1068 enum btrace_read_type type
)
1070 struct perf_event_buffer
*pt
;
1072 pt
= &tinfo
->variant
.pt
.pt
;
1074 linux_fill_btrace_pt_config (&btrace
->config
);
1078 case BTRACE_READ_DELTA
:
1079 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1080 around to stay inside the aux buffer. */
1081 return BTRACE_ERR_NOT_SUPPORTED
;
1083 case BTRACE_READ_NEW
:
1084 if (!perf_event_new_data (pt
))
1085 return BTRACE_ERR_NONE
;
1088 case BTRACE_READ_ALL
:
1089 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1090 return BTRACE_ERR_NONE
;
1093 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1096 /* See linux-btrace.h. */
1099 linux_read_btrace (struct btrace_data
*btrace
,
1100 struct btrace_target_info
*tinfo
,
1101 enum btrace_read_type type
)
1103 switch (tinfo
->conf
.format
)
1105 case BTRACE_FORMAT_NONE
:
1106 return BTRACE_ERR_NOT_SUPPORTED
;
1108 case BTRACE_FORMAT_BTS
:
1109 /* We read btrace in BTS format. */
1110 btrace
->format
= BTRACE_FORMAT_BTS
;
1111 btrace
->variant
.bts
.blocks
= NULL
;
1113 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1115 case BTRACE_FORMAT_PT
:
1116 /* We read btrace in Intel(R) Processor Trace format. */
1117 btrace
->format
= BTRACE_FORMAT_PT
;
1118 btrace
->variant
.pt
.data
= NULL
;
1119 btrace
->variant
.pt
.size
= 0;
1121 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1124 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1127 /* See linux-btrace.h. */
1129 const struct btrace_config
*
1130 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1132 return &tinfo
->conf
;
1135 #else /* !HAVE_LINUX_PERF_EVENT_H */
1137 /* See linux-btrace.h. */
1140 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1145 /* See linux-btrace.h. */
1147 struct btrace_target_info
*
1148 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1153 /* See linux-btrace.h. */
1156 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1158 return BTRACE_ERR_NOT_SUPPORTED
;
1161 /* See linux-btrace.h. */
1164 linux_read_btrace (struct btrace_data
*btrace
,
1165 struct btrace_target_info
*tinfo
,
1166 enum btrace_read_type type
)
1168 return BTRACE_ERR_NOT_SUPPORTED
;
1171 /* See linux-btrace.h. */
1173 const struct btrace_config
*
1174 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1179 #endif /* !HAVE_LINUX_PERF_EVENT_H */