+2015-07-02 Markus Metzger <markus.t.metzger@intel.com>
+
+ * NEWS: Announce new commands "record btrace pt" and "record pt".
+ Announce new options "set|show record btrace pt buffer-size".
+ * btrace.c: Include "rsp-low.h".
+ Include "inttypes.h".
+ (btrace_add_pc): Add forward declaration.
+ (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
+ (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
+ (btrace_compute_ftrace_pt): New.
+ (btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
+ (check_xml_btrace_version): Update version check.
+ (parse_xml_raw, parse_xml_btrace_pt_config_cpu)
+ (parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
+ (btrace_pt_config_cpu_attributes, btrace_pt_config_children)
+ (btrace_pt_children): New.
+ (btrace_children): Add support for "pt".
+ (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
+ (btrace_conf_children): Add support for "pt".
+ * btrace.h: Include "intel-pt.h".
+ (btrace_pt_error): New.
+ * common/btrace-common.c (btrace_format_string, btrace_data_fini)
+ (btrace_data_empty): Support BTRACE_FORMAT_PT.
+ * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
+ (struct btrace_config_pt): New.
+ (struct btrace_config)<pt>: New.
+ (struct btrace_data_pt_config, struct btrace_data_pt): New.
+ (struct btrace_data)<pt>: New.
+ * features/btrace-conf.dtd (btrace-conf)<pt>: New.
+ (pt): New.
+ * features/btrace.dtd (btrace)<pt>: New.
+ (pt, pt-config, cpu): New.
+ * nat/linux-btrace.c (perf_event_read, perf_event_read_all)
+ (perf_event_pt_event_type, kernel_supports_pt)
+ (linux_supports_pt): New.
+ (linux_supports_btrace): Support BTRACE_FORMAT_PT.
+ (linux_enable_bts): Free tinfo on error.
+ (linux_enable_pt): New.
+ (linux_enable_btrace): Support BTRACE_FORMAT_PT.
+ (linux_disable_pt): New.
+ (linux_disable_btrace): Support BTRACE_FORMAT_PT.
+ (linux_fill_btrace_pt_config, linux_read_pt): New.
+ (linux_read_btrace): Support BTRACE_FORMAT_PT.
+ * nat/linux-btrace.h (struct btrace_tinfo_pt): New.
+ (struct btrace_target_info)<pt>: New.
+ * record-btrace.c (set_record_btrace_pt_cmdlist)
+ (show_record_btrace_pt_cmdlist): New.
+ (record_btrace_print_pt_conf): New.
+ (record_btrace_print_conf): Support BTRACE_FORMAT_PT.
+ (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
+ (cmd_record_btrace_pt_start): New.
+ (cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
+ (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
+ (_initialize_record_btrace): Add new commands.
+ * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
+ (remote_protocol_features): Add "Qbtrace:pt".
+ Add "Qbtrace-conf:pt:size".
+ (remote_supports_btrace): Support BTRACE_FORMAT_PT.
+ (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
+ (remote_enable_btrace): Support BTRACE_FORMAT_PT.
+ (_initialize_remote): Add new commands.
+
2015-07-02 Markus Metzger <markus.t.metzger@intel.com>
* configure.ac: check for libipt
set mpx bound on i386 and amd64
Support for bound table investigation on Intel(R) MPX enabled applications.
+record btrace pt
+record pt
+ Start branch trace recording using Intel(R) Processor Trace format.
+
* New options
set debug dwarf-die
show debug linux-namespaces
Control display of debugging info regarding Linux namespaces.
+set|show record btrace pt buffer-size
+ Set and show the size of the ring buffer used for branch tracing in
+ Intel(R) Processor Trace format.
+ The obtained size may differ from the requested size. Use "info
+ record" to see the obtained buffer size.
+
* The command 'thread apply all' can now support new option '-ascending'
to call its specified command for all threads in ascending order.
Qbtrace-conf:bts:size
Set the requested ring buffer size for branch tracing in BTS format.
+Qbtrace:pt
+ Enable Intel(R) Procesor Trace-based branch tracing for the current
+ process. The remote stub reports support for this packet to GDB's
+ qSupported query.
+
+Qbtrace-conf:pt:size
+ Set the requested ring buffer size for branch tracing in Intel(R) Processor
+ Trace format.
+
swbreak stop reason
Indicates a memory breakpoint instruction was executed, irrespective
of whether it was GDB that planted the breakpoint or the breakpoint
#include "filenames.h"
#include "xml-support.h"
#include "regcache.h"
+#include "rsp-low.h"
+
+#include <inttypes.h>
+
+static void btrace_add_pc (struct thread_info *tp);
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
when used in if statements. */
btinfo->level = -level;
}
+#if defined (HAVE_LIBIPT)
+
+static enum btrace_insn_class
+pt_reclassify_insn (enum pt_insn_class iclass)
+{
+ switch (iclass)
+ {
+ case ptic_call:
+ return BTRACE_INSN_CALL;
+
+ case ptic_return:
+ return BTRACE_INSN_RETURN;
+
+ case ptic_jump:
+ return BTRACE_INSN_JUMP;
+
+ default:
+ return BTRACE_INSN_OTHER;
+ }
+}
+
+/* Add function branch trace using DECODER. */
+
+static void
+ftrace_add_pt (struct pt_insn_decoder *decoder,
+ struct btrace_function **pbegin,
+ struct btrace_function **pend, int *plevel,
+ unsigned int *ngaps)
+{
+ struct btrace_function *begin, *end, *upd;
+ uint64_t offset;
+ int errcode, nerrors;
+
+ begin = *pbegin;
+ end = *pend;
+ nerrors = 0;
+ for (;;)
+ {
+ struct btrace_insn btinsn;
+ struct pt_insn insn;
+
+ errcode = pt_insn_sync_forward (decoder);
+ if (errcode < 0)
+ {
+ if (errcode != -pte_eos)
+ warning (_("Failed to synchronize onto the Intel(R) Processor "
+ "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
+ break;
+ }
+
+ memset (&btinsn, 0, sizeof (btinsn));
+ for (;;)
+ {
+ errcode = pt_insn_next (decoder, &insn, sizeof(insn));
+ if (errcode < 0)
+ break;
+
+ /* Look for gaps in the trace - unless we're at the beginning. */
+ if (begin != NULL)
+ {
+ /* Tracing is disabled and re-enabled each time we enter the
+ kernel. Most times, we continue from the same instruction we
+ stopped before. This is indicated via the RESUMED instruction
+ flag. The ENABLED instruction flag means that we continued
+ from some other instruction. Indicate this as a trace gap. */
+ if (insn.enabled)
+ *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+ }
+
+ upd = ftrace_update_function (end, insn.ip);
+ if (upd != end)
+ {
+ *pend = end = upd;
+
+ if (begin == NULL)
+ *pbegin = begin = upd;
+ }
+
+ /* Maintain the function level offset. */
+ *plevel = min (*plevel, end->level);
+
+ btinsn.pc = (CORE_ADDR) insn.ip;
+ btinsn.size = (gdb_byte) insn.size;
+ btinsn.iclass = pt_reclassify_insn (insn.iclass);
+
+ ftrace_update_insns (end, &btinsn);
+ }
+
+ if (errcode == -pte_eos)
+ break;
+
+ /* If the gap is at the very beginning, we ignore it - we will have
+ less trace, but we won't have any holes in the trace. */
+ if (begin == NULL)
+ continue;
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Failed to decode Intel(R) Processor Trace near trace "
+ "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
+ offset, insn.ip, pt_errstr (pt_errcode (errcode)));
+
+ /* Indicate the gap in the trace. */
+ *pend = end = ftrace_new_gap (end, errcode);
+ *ngaps += 1;
+ }
+
+ if (nerrors > 0)
+ warning (_("The recorded execution trace may have gaps."));
+}
+
+/* A callback function to allow the trace decoder to read the inferior's
+ memory. */
+
+static int
+btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
+ const struct pt_asid *asid, CORE_ADDR pc,
+ void *context)
+{
+ int errcode;
+
+ TRY
+ {
+ errcode = target_read_code (pc, buffer, size);
+ if (errcode != 0)
+ return -pte_nomap;
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ return -pte_nomap;
+ }
+ END_CATCH
+
+ return size;
+}
+
+/* Translate the vendor from one enum to another. */
+
+static enum pt_cpu_vendor
+pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
+{
+ switch (vendor)
+ {
+ default:
+ return pcv_unknown;
+
+ case CV_INTEL:
+ return pcv_intel;
+ }
+}
+
+/* Finalize the function branch trace after decode. */
+
+static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
+ struct thread_info *tp, int level)
+{
+ pt_insn_free_decoder (decoder);
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ tp->btrace.level = -level;
+
+ /* Add a single last instruction entry for the current PC.
+ This allows us to compute the backtrace at the current PC using both
+ standard unwind and btrace unwind.
+ This extra entry is ignored by all record commands. */
+ btrace_add_pc (tp);
+}
+
+/* Compute the function branch trace from Intel(R) Processor Trace. */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace)
+{
+ struct btrace_thread_info *btinfo;
+ struct pt_insn_decoder *decoder;
+ struct pt_config config;
+ int level, errcode;
+
+ if (btrace->size == 0)
+ return;
+
+ btinfo = &tp->btrace;
+ level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
+
+ pt_config_init(&config);
+ config.begin = btrace->data;
+ config.end = btrace->data + btrace->size;
+
+ config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
+ config.cpu.family = btrace->config.cpu.family;
+ config.cpu.model = btrace->config.cpu.model;
+ config.cpu.stepping = btrace->config.cpu.stepping;
+
+ errcode = pt_cpu_errata (&config.errata, &config.cpu);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
+ pt_errstr (pt_errcode (errcode)));
+
+ decoder = pt_insn_alloc_decoder (&config);
+ if (decoder == NULL)
+ error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
+
+ TRY
+ {
+ struct pt_image *image;
+
+ image = pt_insn_get_image(decoder);
+ if (image == NULL)
+ error (_("Failed to configure the Intel(R) Processor Trace decoder."));
+
+ errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel(R) Processor Trace decoder: "
+ "%s."), pt_errstr (pt_errcode (errcode)));
+
+ ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
+ &btinfo->ngaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ /* Indicate a gap in the trace if we quit trace processing. */
+ if (error.reason == RETURN_QUIT && btinfo->end != NULL)
+ {
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+ btinfo->ngaps++;
+ }
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+}
+
+#else /* defined (HAVE_LIBIPT) */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace)
+{
+ internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
+}
+
+#endif /* defined (HAVE_LIBIPT) */
+
/* Compute the function branch trace from a block branch trace BTRACE for
a thread given by BTINFO. */
case BTRACE_FORMAT_BTS:
btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
return;
+
+ case BTRACE_FORMAT_PT:
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
+ return;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
case BTRACE_FORMAT_BTS:
return btrace_stitch_bts (&btrace->variant.bts, tp);
+
+ case BTRACE_FORMAT_PT:
+ /* Delta reads are not supported. */
+ return -1;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
block->end = *end;
}
+/* Parse a "raw" xml record. */
+
+static void
+parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
+ gdb_byte **pdata, unsigned long *psize)
+{
+ struct cleanup *cleanup;
+ gdb_byte *data, *bin;
+ unsigned long size;
+ size_t len;
+
+ len = strlen (body_text);
+ size = len / 2;
+
+ if ((size_t) size * 2 != len)
+ gdb_xml_error (parser, _("Bad raw data size."));
+
+ bin = data = xmalloc (size);
+ cleanup = make_cleanup (xfree, data);
+
+ /* We use hex encoding - see common/rsp-low.h. */
+ while (len > 0)
+ {
+ char hi, lo;
+
+ hi = *body_text++;
+ lo = *body_text++;
+
+ if (hi == 0 || lo == 0)
+ gdb_xml_error (parser, _("Bad hex encoding."));
+
+ *bin++ = fromhex (hi) * 16 + fromhex (lo);
+ len -= 2;
+ }
+
+ discard_cleanups (cleanup);
+
+ *pdata = data;
+ *psize = size;
+}
+
+/* Parse a btrace pt-config "cpu" xml record. */
+
+static void
+parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+ const char *vendor;
+ ULONGEST *family, *model, *stepping;
+
+ vendor = xml_find_attribute (attributes, "vendor")->value;
+ family = xml_find_attribute (attributes, "family")->value;
+ model = xml_find_attribute (attributes, "model")->value;
+ stepping = xml_find_attribute (attributes, "stepping")->value;
+
+ btrace = user_data;
+
+ if (strcmp (vendor, "GenuineIntel") == 0)
+ btrace->variant.pt.config.cpu.vendor = CV_INTEL;
+
+ btrace->variant.pt.config.cpu.family = *family;
+ btrace->variant.pt.config.cpu.model = *model;
+ btrace->variant.pt.config.cpu.stepping = *stepping;
+}
+
+/* Parse a btrace pt "raw" xml record. */
+
+static void
+parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, const char *body_text)
+{
+ struct btrace_data *btrace;
+
+ btrace = user_data;
+ parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
+ &btrace->variant.pt.size);
+}
+
+/* Parse a btrace "pt" xml record. */
+
+static void
+parse_xml_btrace_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+
+ btrace = user_data;
+ btrace->format = BTRACE_FORMAT_PT;
+ btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
+ btrace->variant.pt.data = NULL;
+ btrace->variant.pt.size = 0;
+}
+
static const struct gdb_xml_attribute block_attributes[] = {
{ "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
+static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
+ { "vendor", GDB_XML_AF_NONE, NULL, NULL },
+ { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_config_children[] = {
+ { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_pt_config_cpu, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_children[] = {
+ { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
+ NULL },
+ { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
static const struct gdb_xml_attribute btrace_attributes[] = {
{ "version", GDB_XML_AF_NONE, NULL, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
static const struct gdb_xml_element btrace_children[] = {
{ "block", block_attributes, NULL,
GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
+ { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
+ NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
size = xml_find_attribute (attributes, "size");
if (size != NULL)
- conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
+ conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
}
+/* Parse a btrace-conf "pt" xml record. */
+
+static void
+parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = user_data;
+ conf->format = BTRACE_FORMAT_PT;
+ conf->pt.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
+}
+
+static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
{ "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
static const struct gdb_xml_element btrace_conf_children[] = {
{ "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
parse_xml_btrace_conf_bts, NULL },
+ { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_pt, NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
#include "btrace-common.h"
#include "target/waitstatus.h" /* For enum target_stop_reason. */
+#if defined (HAVE_LIBIPT)
+# include <intel-pt.h>
+#endif
+
struct thread_info;
struct btrace_function;
BDE_BTS_INSN_SIZE
};
+/* Decode errors for the Intel(R) Processor Trace recording format. */
+enum btrace_pt_error
+{
+ /* The user cancelled trace processing. */
+ BDE_PT_USER_QUIT = 1,
+
+ /* Tracing was temporarily disabled. */
+ BDE_PT_DISABLED,
+
+ /* Trace recording overflowed. */
+ BDE_PT_OVERFLOW
+
+ /* Negative numbers are used by the decoder library. */
+};
+
/* A branch trace function segment.
This represents a function segment in a branch trace, i.e. a consecutive
case BTRACE_FORMAT_BTS:
return _("Branch Trace Store");
+
+ case BTRACE_FORMAT_PT:
+ return _("Intel(R) Processor Trace");
}
internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
case BTRACE_FORMAT_BTS:
VEC_free (btrace_block_s, data->variant.bts.blocks);
return;
+
+ case BTRACE_FORMAT_PT:
+ xfree (data->variant.pt.data);
+ return;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
case BTRACE_FORMAT_BTS:
return VEC_empty (btrace_block_s, data->variant.bts.blocks);
+
+ case BTRACE_FORMAT_PT:
+ return (data->variant.pt.size == 0);
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
/* Branch trace is in Branch Trace Store (BTS) format.
Actually, the format is a sequence of blocks derived from BTS. */
- BTRACE_FORMAT_BTS
+ BTRACE_FORMAT_BTS,
+
+ /* Branch trace is in Intel(R) Processor Trace format. */
+ BTRACE_FORMAT_PT
};
/* An enumeration of cpu vendors. */
unsigned int size;
};
+/* An Intel(R) Processor Trace configuration. */
+
+struct btrace_config_pt
+{
+ /* The size of the branch trace buffer in bytes. */
+ unsigned int size;
+};
+
/* A branch tracing configuration.
This describes the requested configuration as well as the actually
/* The BTS format configuration. */
struct btrace_config_bts bts;
+
+ /* The Intel(R) Processor Trace format configuration. */
+ struct btrace_config_pt pt;
};
/* Branch trace in BTS format. */
VEC (btrace_block_s) *blocks;
};
+/* Configuration information to go with the trace data. */
+struct btrace_data_pt_config
+{
+ /* The processor on which the trace has been collected. */
+ struct btrace_cpu cpu;
+};
+
+/* Branch trace in Intel(R) Processor Trace format. */
+struct btrace_data_pt
+{
+ /* Some configuration information to go with the data. */
+ struct btrace_data_pt_config config;
+
+ /* The trace data. */
+ gdb_byte *data;
+
+ /* The size of DATA in bytes. */
+ unsigned long size;
+};
+
/* The branch trace data. */
struct btrace_data
{
{
/* Format == BTRACE_FORMAT_BTS. */
struct btrace_data_bts bts;
+
+ /* Format == BTRACE_FORMAT_PT. */
+ struct btrace_data_pt pt;
} variant;
};
+2015-07-02 Markus Metzger <markus.t.metzger@intel.com>
+
+ * gdb.texinfo (Process Record and Replay): Spell out that variables
+ and registers are not available during btrace replay.
+ Describe the new "record btrace pt" command.
+ Describe the new "set|show record btrace pt buffer-size" options.
+ (General Query Packets): Describe the new Qbtrace:pt and
+ Qbtrace-conf:pt:size packets.
+ Expand "bts" to "Branch Trace Store".
+ Update the branch trace DTD.
+
2015-06-26 Patrick Palka <patrick@parcs.ath.cx>
* gdb.texinfo (Command History): Document the new option
@kindex record full
@kindex record btrace
@kindex record btrace bts
+@kindex record btrace pt
@kindex record bts
+@kindex record pt
@kindex rec
@kindex rec full
@kindex rec btrace
@kindex rec btrace bts
+@kindex rec btrace pt
@kindex rec bts
+@kindex rec pt
@item record @var{method}
This command starts the process record and replay target. The
recording method can be specified as parameter. Without a parameter
@item btrace @var{format}
Hardware-supported instruction recording. This method does not record
data. Further, the data is collected in a ring buffer so old data will
-be overwritten when the buffer is full. It allows limited replay and
-reverse execution.
+be overwritten when the buffer is full. It allows limited reverse
+execution. Variables and registers are not available during reverse
+execution.
The recording format can be specified as parameter. Without a parameter
the command chooses the recording format. The following recording
Use the @dfn{Branch Trace Store} (@acronym{BTS}) recording format. In
this format, the processor stores a from/to record for each executed
branch in the btrace ring buffer.
+
+@item pt
+@cindex Intel(R) Processor Trace
+Use the @dfn{Intel(R) Processor Trace} recording format. In this
+format, the processor stores the execution trace in a compressed form
+that is afterwards decoded by @value{GDBN}.
+
+The trace can be recorded with very low overhead. The compressed
+trace format also allows small trace buffers to already contain a big
+number of instructions compared to @acronym{BTS}.
+
+Decoding the recorded execution trace, on the other hand, is more
+expensive than decoding @acronym{BTS} trace. This is mostly due to the
+increased number of instructions to process. You should increase the
+buffer-size with care.
@end table
Not all recording formats may be available on all processors.
the @kbd{run} or @kbd{start} commands, and then start the recording
with the @kbd{record @var{method}} command.
-Both @code{record @var{method}} and @code{rec @var{method}} are
-aliases of @code{target record-@var{method}}.
-
@cindex displaced stepping, and process record and replay
Displaced stepping (@pxref{Maintenance Commands,, displaced stepping})
will be automatically disabled when process record and replay target
Show the current setting of the requested ring buffer size for branch
tracing in @acronym{BTS} format.
+@kindex set record btrace pt
+@item set record btrace pt buffer-size @var{size}
+@itemx set record btrace pt buffer-size unlimited
+Set the requested ring buffer size for branch tracing in Intel(R)
+Processor Trace format. Default is 16KB.
+
+If @var{size} is a positive number, then @value{GDBN} will try to
+allocate a buffer of at least @var{size} bytes for each new thread
+that uses the btrace recording method and the Intel(R) Processor Trace
+format. The actually obtained buffer size may differ from the
+requested @var{size}. Use the @code{info record} command to see the
+actual buffer size for each thread.
+
+If @var{limit} is @code{unlimited} or zero, @value{GDBN} will try to
+allocate a buffer of 4MB.
+
+Bigger buffers mean longer traces. On the other hand, @value{GDBN} will
+also need longer to process the branch trace data before it can be used.
+
+@item show record btrace pt buffer-size @var{size}
+Show the current setting of the requested ring buffer size for branch
+tracing in Intel(R) Processor Trace format.
+
@kindex info record
@item info record
Show various statistics about the recording depending on the recording
@item
Size of the perf ring buffer.
@end itemize
+
+For the @code{pt} recording format, it also shows:
+@itemize @bullet
+@item
+Size of the perf ring buffer.
+@end itemize
@end table
@kindex record delete
@tab @samp{-}
@tab Yes
+@item @samp{Qbtrace:pt}
+@tab Yes
+@tab @samp{-}
+@tab Yes
+
@item @samp{Qbtrace-conf:bts:size}
@tab Yes
@tab @samp{-}
@tab Yes
+@item @samp{Qbtrace-conf:pt:size}
+@tab Yes
+@tab @samp{-}
+@tab Yes
+
@item @samp{QNonStop}
@tab No
@tab @samp{-}
@item Qbtrace:bts
The remote stub understands the @samp{Qbtrace:bts} packet.
+@item Qbtrace:pt
+The remote stub understands the @samp{Qbtrace:pt} packet.
+
@item Qbtrace-conf:bts:size
The remote stub understands the @samp{Qbtrace-conf:bts:size} packet.
+@item Qbtrace-conf:pt:size
+The remote stub understands the @samp{Qbtrace-conf:pt:size} packet.
+
@item swbreak
The remote stub reports the @samp{swbreak} stop reason for memory
breakpoints.
@end table
@item Qbtrace:bts
-Enable branch tracing for the current thread using bts tracing.
+Enable branch tracing for the current thread using Branch Trace Store.
+
+Reply:
+@table @samp
+@item OK
+Branch tracing has been enabled.
+@item E.errtext
+A badly formed request or an error was encountered.
+@end table
+
+@item Qbtrace:pt
+Enable branch tracing for the current thread using Intel(R) Processor Trace.
Reply:
@table @samp
A badly formed request or an error was encountered.
@end table
+@item Qbtrace-conf:pt:size=@var{value}
+Set the requested ring buffer size for new threads that use the
+btrace recording method in pt format.
+
+Reply:
+@table @samp
+@item OK
+The ring buffer size has been set.
+@item E.errtext
+A badly formed request or an error was encountered.
+@end table
+
@end table
@node Architecture-Specific Protocol Details
The formal DTD for the branch trace format is given below:
@smallexample
-<!ELEMENT btrace (block)* >
+<!ELEMENT btrace (block* | pt) >
<!ATTLIST btrace version CDATA #FIXED "1.0">
<!ELEMENT block EMPTY>
<!ATTLIST block begin CDATA #REQUIRED
end CDATA #REQUIRED>
+
+<!ELEMENT pt (pt-config?, raw?)>
+
+<!ELEMENT pt-config (cpu?)>
+
+<!ELEMENT cpu EMPTY>
+<!ATTLIST cpu vendor CDATA #REQUIRED
+ family CDATA #REQUIRED
+ model CDATA #REQUIRED
+ stepping CDATA #REQUIRED>
+
+<!ELEMENT raw (#PCDATA)>
@end smallexample
@node Branch Trace Configuration Format
@item size
The size of the @acronym{BTS} ring buffer in bytes.
@end table
+@item pt
+This thread uses the @dfn{Intel(R) Processor Trace} (@acronym{Intel(R)
+PT}) format.
+@table @code
+@item size
+The size of the @acronym{Intel(R) PT} ring buffer in bytes.
+@end table
@end table
@value{GDBN} must be linked with the Expat library to support XML
The formal DTD for the branch trace configuration format is given below:
@smallexample
-<!ELEMENT btrace-conf (bts?)>
+<!ELEMENT btrace-conf (bts?, pt?)>
<!ATTLIST btrace-conf version CDATA #FIXED "1.0">
<!ELEMENT bts EMPTY>
<!ATTLIST bts size CDATA #IMPLIED>
+
+<!ELEMENT pt EMPTY>
+<!ATTLIST pt size CDATA #IMPLIED>
@end smallexample
@include agentexpr.texi
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. -->
-<!ELEMENT btrace-conf (bts?)>
+<!ELEMENT btrace-conf (bts?, pt?)>
<!ATTLIST btrace-conf version CDATA #FIXED "1.0">
<!ELEMENT bts EMPTY>
<!ATTLIST bts size CDATA #IMPLIED>
+
+<!ELEMENT pt EMPTY>
+<!ATTLIST pt size CDATA #IMPLIED>
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. -->
-<!ELEMENT btrace (block)* >
+<!ELEMENT btrace (block* | pt)>
<!ATTLIST btrace version CDATA #FIXED "1.0">
<!ELEMENT block EMPTY>
<!ATTLIST block begin CDATA #REQUIRED
end CDATA #REQUIRED>
+
+<!ELEMENT pt (pt-config?, raw?)>
+
+<!ELEMENT pt-config (cpu?)>
+
+<!ELEMENT cpu EMPTY>
+<!ATTLIST cpu vendor CDATA #REQUIRED
+ family CDATA #REQUIRED
+ model CDATA #REQUIRED
+ stepping CDATA #REQUIRED>
+
+<!ELEMENT raw (#PCDATA)>
+2015-07-02 Markus Metzger <markus.t.metzger@intel.com>
+
+ * linux-low.c: Include "rsp-low.h"
+ (linux_low_encode_pt_config, linux_low_encode_raw): New.
+ (linux_low_read_btrace): Support BTRACE_FORMAT_PT.
+ (linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
+ (handle_btrace_enable_pt): New.
+ (handle_btrace_general_set): Support "pt".
+ (handle_btrace_conf_general_set): Support "pt:size".
+
2015-06-29 Pierre Langlois <pierre.langlois@arm.com>
* linux-aarch64-low.c (aarch64_supports_z_point_type): Enable for
#include "nat/linux-osdata.h"
#include "agent.h"
#include "tdesc.h"
+#include "rsp-low.h"
#include "nat/linux-nat.h"
#include "nat/linux-waitpid.h"
return (err == BTRACE_ERR_NONE ? 0 : -1);
}
+/* Encode an Intel(R) Processor Trace configuration. */
+
+static void
+linux_low_encode_pt_config (struct buffer *buffer,
+ const struct btrace_data_pt_config *config)
+{
+ buffer_grow_str (buffer, "<pt-config>\n");
+
+ switch (config->cpu.vendor)
+ {
+ case CV_INTEL:
+ buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
+ "model=\"%u\" stepping=\"%u\"/>\n",
+ config->cpu.family, config->cpu.model,
+ config->cpu.stepping);
+ break;
+
+ default:
+ break;
+ }
+
+ buffer_grow_str (buffer, "</pt-config>\n");
+}
+
+/* Encode a raw buffer. */
+
+static void
+linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
+ unsigned int size)
+{
+ if (size == 0)
+ return;
+
+ /* We use hex encoding - see common/rsp-low.h. */
+ buffer_grow_str (buffer, "<raw>\n");
+
+ while (size-- > 0)
+ {
+ char elem[2];
+
+ elem[0] = tohex ((*data >> 4) & 0xf);
+ elem[1] = tohex (*data++ & 0xf);
+
+ buffer_grow (buffer, elem, 2);
+ }
+
+ buffer_grow_str (buffer, "</raw>\n");
+}
+
/* See to_read_btrace target method. */
static int
else
buffer_grow_str0 (buffer, "E.Generic Error.");
- btrace_data_fini (&btrace);
- return -1;
+ goto err;
}
switch (btrace.format)
{
case BTRACE_FORMAT_NONE:
buffer_grow_str0 (buffer, "E.No Trace.");
- break;
+ goto err;
case BTRACE_FORMAT_BTS:
buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
buffer_grow_str0 (buffer, "</btrace>\n");
break;
- default:
- buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
+ case BTRACE_FORMAT_PT:
+ buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
+ buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
+ buffer_grow_str (buffer, "<pt>\n");
+
+ linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
- btrace_data_fini (&btrace);
- return -1;
+ linux_low_encode_raw (buffer, btrace.variant.pt.data,
+ btrace.variant.pt.size);
+
+ buffer_grow_str (buffer, "</pt>\n");
+ buffer_grow_str0 (buffer, "</btrace>\n");
+ break;
+
+ default:
+ buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
+ goto err;
}
btrace_data_fini (&btrace);
return 0;
+
+err:
+ btrace_data_fini (&btrace);
+ return -1;
}
/* See to_btrace_conf target method. */
buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
buffer_xml_printf (buffer, " />\n");
break;
+
+ case BTRACE_FORMAT_PT:
+ buffer_xml_printf (buffer, "<pt");
+ buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
+ buffer_xml_printf (buffer, "/>\n");
+ break;
}
}
return NULL;
}
+/* Handle btrace enabling in Intel(R) Processor Trace format. */
+
+static const char *
+handle_btrace_enable_pt (struct thread_info *thread)
+{
+ if (thread->btrace != NULL)
+ return "E.Btrace already enabled.";
+
+ current_btrace_conf.format = BTRACE_FORMAT_PT;
+ thread->btrace = target_enable_btrace (thread->entry.id,
+ ¤t_btrace_conf);
+ if (thread->btrace == NULL)
+ return "E.Could not enable btrace.";
+
+ return NULL;
+}
+
/* Handle btrace disabling. */
static const char *
if (strcmp (op, "bts") == 0)
err = handle_btrace_enable_bts (thread);
+ else if (strcmp (op, "pt") == 0)
+ err = handle_btrace_enable_pt (thread);
else if (strcmp (op, "off") == 0)
err = handle_btrace_disable (thread);
else
- err = "E.Bad Qbtrace operation. Use bts or off.";
+ err = "E.Bad Qbtrace operation. Use bts, pt, or off.";
if (err != 0)
strcpy (own_buf, err);
current_btrace_conf.bts.size = (unsigned int) size;
}
+ else if (strncmp (op, "pt:size=", strlen ("pt:size=")) == 0)
+ {
+ unsigned long size;
+ char *endp = NULL;
+
+ errno = 0;
+ size = strtoul (op + strlen ("pt:size="), &endp, 16);
+ if (endp == NULL || *endp != 0 || errno != 0 || size > UINT_MAX)
+ {
+ strcpy (own_buf, "E.Bad size value.");
+ return -1;
+ }
+
+ current_btrace_conf.pt.size = (unsigned int) size;
+ }
else
{
strcpy (own_buf, "E.Bad Qbtrace configuration option.");
static void
supported_btrace_packets (char *buf)
{
+ int btrace_supported = 0;
+
if (target_supports_btrace (BTRACE_FORMAT_BTS))
{
strcat (buf, ";Qbtrace:bts+");
strcat (buf, ";Qbtrace-conf:bts:size+");
+
+ btrace_supported = 1;
}
- else
+
+ if (target_supports_btrace (BTRACE_FORMAT_PT))
+ {
+ strcat (buf, ";Qbtrace:pt+");
+ strcat (buf, ";Qbtrace-conf:pt:size+");
+
+ btrace_supported = 1;
+ }
+
+ if (!btrace_supported)
return;
strcat (buf, ";Qbtrace:off+");
This is the same as the size of a pointer for the inferior process
except when a 32-bit inferior is running on a 64-bit OS. */
+/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
+ to the memory holding the copy.
+ The caller is responsible for freeing the memory. */
+
+static gdb_byte *
+perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
+ unsigned long size)
+{
+ const gdb_byte *begin, *end, *start, *stop;
+ gdb_byte *buffer;
+ unsigned long data_tail, buffer_size;
+
+ if (size == 0)
+ return NULL;
+
+ gdb_assert (size <= data_head);
+ data_tail = data_head - size;
+
+ buffer_size = pev->size;
+ begin = pev->mem;
+ start = begin + data_tail % buffer_size;
+ stop = begin + data_head % buffer_size;
+
+ buffer = xmalloc (size);
+
+ if (start < stop)
+ memcpy (buffer, start, stop - start);
+ else
+ {
+ end = begin + buffer_size;
+
+ memcpy (buffer, start, end - start);
+ memcpy (buffer + (end - start), begin, stop - begin);
+ }
+
+ return buffer;
+}
+
+/* Copy the perf event buffer data from PEV.
+ Store a pointer to the copy into DATA and its size in SIZE. */
+
+static void
+perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
+ unsigned long *psize)
+{
+ unsigned long data_head, size;
+
+ data_head = *pev->data_head;
+
+ size = pev->size;
+ if (data_head < size)
+ size = data_head;
+
+ *data = perf_event_read (pev, data_head, size);
+ *psize = size;
+
+ pev->last_head = data_head;
+}
+
+/* Determine the event type.
+ Returns zero on success and fills in TYPE; returns -1 otherwise. */
+
+static int
+perf_event_pt_event_type (int *type)
+{
+ FILE *file;
+ int found;
+
+ file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
+ if (file == NULL)
+ return -1;
+
+ found = fscanf (file, "%d", type);
+
+ fclose (file);
+
+ if (found == 1)
+ return 0;
+ return -1;
+}
+
static int
linux_determine_kernel_ptr_bits (void)
{
}
}
+/* Check whether the kernel supports Intel(R) Processor Trace. */
+
+static int
+kernel_supports_pt (void)
+{
+ struct perf_event_attr attr;
+ pid_t child, pid;
+ int status, file, type;
+
+ errno = 0;
+ child = fork ();
+ switch (child)
+ {
+ case -1:
+ warning (_("test pt: cannot fork: %s."), strerror (errno));
+ return 0;
+
+ case 0:
+ status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
+ if (status != 0)
+ {
+ warning (_("test pt: cannot PTRACE_TRACEME: %s."),
+ strerror (errno));
+ _exit (1);
+ }
+
+ status = raise (SIGTRAP);
+ if (status != 0)
+ {
+ warning (_("test pt: cannot raise SIGTRAP: %s."),
+ strerror (errno));
+ _exit (1);
+ }
+
+ _exit (1);
+
+ default:
+ pid = waitpid (child, &status, 0);
+ if (pid != child)
+ {
+ warning (_("test pt: bad pid %ld, error: %s."),
+ (long) pid, strerror (errno));
+ return 0;
+ }
+
+ if (!WIFSTOPPED (status))
+ {
+ warning (_("test pt: expected stop. status: %d."),
+ status);
+ return 0;
+ }
+
+ status = perf_event_pt_event_type (&type);
+ if (status != 0)
+ file = -1;
+ else
+ {
+ memset (&attr, 0, sizeof (attr));
+
+ attr.size = sizeof (attr);
+ attr.type = type;
+ attr.exclude_kernel = 1;
+ attr.exclude_hv = 1;
+ attr.exclude_idle = 1;
+
+ file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
+ if (file >= 0)
+ close (file);
+ }
+
+ kill (child, SIGKILL);
+ ptrace (PTRACE_KILL, child, NULL, NULL);
+
+ pid = waitpid (child, &status, 0);
+ if (pid != child)
+ {
+ warning (_("test pt: bad pid %ld, error: %s."),
+ (long) pid, strerror (errno));
+ if (!WIFSIGNALED (status))
+ warning (_("test pt: expected killed. status: %d."),
+ status);
+ }
+
+ return (file >= 0);
+ }
+}
+
/* Check whether an Intel cpu supports BTS. */
static int
return cached > 0;
}
+/* Check whether the linux target supports Intel(R) Processor Trace. */
+
+static int
+linux_supports_pt (void)
+{
+ static int cached;
+
+ if (cached == 0)
+ {
+ if (!kernel_supports_pt ())
+ cached = -1;
+ else
+ cached = 1;
+ }
+
+ return cached > 0;
+}
+
/* See linux-btrace.h. */
int
case BTRACE_FORMAT_BTS:
return linux_supports_bts ();
+
+ case BTRACE_FORMAT_PT:
+ return linux_supports_pt ();
}
internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
errno = 0;
bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
if (bts->file < 0)
- goto err;
+ goto err_out;
/* Convert the requested size in bytes to pages (rounding up). */
pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
/* We were not able to allocate any buffer. */
close (bts->file);
+ err_out:
+ xfree (tinfo);
+ return NULL;
+}
+
+#if defined (PERF_ATTR_SIZE_VER5)
+
+/* Enable branch tracing in Intel(R) Processor Trace format. */
+
+static struct btrace_target_info *
+linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
+{
+ struct perf_event_mmap_page *header;
+ struct btrace_target_info *tinfo;
+ struct btrace_tinfo_pt *pt;
+ unsigned long long pages, size;
+ int pid, pg, errcode, type;
+
+ if (conf->size == 0)
+ return NULL;
+
+ errcode = perf_event_pt_event_type (&type);
+ if (errcode != 0)
+ return NULL;
+
+ pid = ptid_get_lwp (ptid);
+ if (pid == 0)
+ pid = ptid_get_pid (ptid);
+
+ tinfo = xzalloc (sizeof (*tinfo));
+ tinfo->ptid = ptid;
+ tinfo->ptr_bits = 0;
+
+ tinfo->conf.format = BTRACE_FORMAT_PT;
+ pt = &tinfo->variant.pt;
+
+ pt->attr.size = sizeof (pt->attr);
+ pt->attr.type = type;
+
+ pt->attr.exclude_kernel = 1;
+ pt->attr.exclude_hv = 1;
+ pt->attr.exclude_idle = 1;
+
+ errno = 0;
+ pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
+ if (pt->file < 0)
+ goto err;
+
+ /* Allocate the configuration page. */
+ header = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pt->file, 0);
+ if (header == MAP_FAILED)
+ goto err_file;
+
+ header->aux_offset = header->data_offset + header->data_size;
+
+ /* Convert the requested size in bytes to pages (rounding up). */
+ pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ /* We need at least one page. */
+ if (pages == 0)
+ pages = 1;
+
+ /* The buffer size can be requested in powers of two pages. Adjust PAGES
+ to the next power of two. */
+ for (pg = 0; pages != (1u << pg); ++pg)
+ if ((pages & (1u << pg)) != 0)
+ pages += (1u << pg);
+
+ /* We try to allocate the requested size.
+ If that fails, try to get as much as we can. */
+ for (; pages > 0; pages >>= 1)
+ {
+ size_t length;
+
+ size = pages * PAGE_SIZE;
+ length = size;
+
+ /* Check for overflows. */
+ if ((unsigned long long) length < size)
+ continue;
+
+ header->aux_size = size;
+
+ pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
+ header->aux_offset);
+ if (pt->pt.mem != MAP_FAILED)
+ break;
+ }
+
+ if (pages == 0)
+ goto err_conf;
+
+ pt->header = header;
+ pt->pt.size = size;
+ pt->pt.data_head = &header->aux_head;
+
+ tinfo->conf.pt.size = size;
+ return tinfo;
+
+ err_conf:
+ munmap((void *) header, PAGE_SIZE);
+
+ err_file:
+ close (pt->file);
+
err:
xfree (tinfo);
return NULL;
}
+#else /* !defined (PERF_ATTR_SIZE_VER5) */
+
+static struct btrace_target_info *
+linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
+{
+ errno = EOPNOTSUPP;
+ return NULL;
+}
+
+#endif /* !defined (PERF_ATTR_SIZE_VER5) */
+
/* See linux-btrace.h. */
struct btrace_target_info *
case BTRACE_FORMAT_BTS:
tinfo = linux_enable_bts (ptid, &conf->bts);
break;
+
+ case BTRACE_FORMAT_PT:
+ tinfo = linux_enable_pt (ptid, &conf->pt);
+ break;
}
return tinfo;
return BTRACE_ERR_NONE;
}
+/* Disable Intel(R) Processor Trace tracing. */
+
+static enum btrace_error
+linux_disable_pt (struct btrace_tinfo_pt *tinfo)
+{
+ munmap((void *) tinfo->pt.mem, tinfo->pt.size);
+ munmap((void *) tinfo->header, PAGE_SIZE);
+ close (tinfo->file);
+
+ return BTRACE_ERR_NONE;
+}
+
/* See linux-btrace.h. */
enum btrace_error
case BTRACE_FORMAT_BTS:
errcode = linux_disable_bts (&tinfo->variant.bts);
break;
+
+ case BTRACE_FORMAT_PT:
+ errcode = linux_disable_pt (&tinfo->variant.pt);
+ break;
}
if (errcode == BTRACE_ERR_NONE)
return BTRACE_ERR_NONE;
}
+/* Fill in the Intel(R) Processor Trace configuration information. */
+
+static void
+linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
+{
+ conf->cpu = btrace_this_cpu ();
+}
+
+/* Read branch trace data in Intel(R) Processor Trace format for the thread
+ given by TINFO into BTRACE using the TYPE reading method. */
+
+static enum btrace_error
+linux_read_pt (struct btrace_data_pt *btrace,
+ struct btrace_target_info *tinfo,
+ enum btrace_read_type type)
+{
+ struct perf_event_buffer *pt;
+
+ pt = &tinfo->variant.pt.pt;
+
+ linux_fill_btrace_pt_config (&btrace->config);
+
+ switch (type)
+ {
+ case BTRACE_READ_DELTA:
+ /* We don't support delta reads. The data head (i.e. aux_head) wraps
+ around to stay inside the aux buffer. */
+ return BTRACE_ERR_NOT_SUPPORTED;
+
+ case BTRACE_READ_NEW:
+ if (!perf_event_new_data (pt))
+ return BTRACE_ERR_NONE;
+
+ /* Fall through. */
+ case BTRACE_READ_ALL:
+ perf_event_read_all (pt, &btrace->data, &btrace->size);
+ return BTRACE_ERR_NONE;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
+}
+
/* See linux-btrace.h. */
enum btrace_error
btrace->variant.bts.blocks = NULL;
return linux_read_bts (&btrace->variant.bts, tinfo, type);
+
+ case BTRACE_FORMAT_PT:
+ /* We read btrace in Intel(R) Processor Trace format. */
+ btrace->format = BTRACE_FORMAT_PT;
+ btrace->variant.pt.data = NULL;
+ btrace->variant.pt.size = 0;
+
+ return linux_read_pt (&btrace->variant.pt, tinfo, type);
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
/* The BTS perf event buffer. */
struct perf_event_buffer bts;
};
+
+/* Branch trace target information for Intel(R) Processor Trace. */
+struct btrace_tinfo_pt
+{
+ /* The Linux perf_event configuration for collecting the branch trace. */
+ struct perf_event_attr attr;
+
+ /* The perf event file. */
+ int file;
+
+ /* The perf event configuration page. */
+ volatile struct perf_event_mmap_page *header;
+
+ /* The trace perf event buffer. */
+ struct perf_event_buffer pt;
+};
#endif /* HAVE_LINUX_PERF_EVENT_H */
/* Branch trace target information per thread. */
{
/* CONF.FORMAT == BTRACE_FORMAT_BTS. */
struct btrace_tinfo_bts bts;
+
+ /* CONF.FORMAT == BTRACE_FORMAT_PT. */
+ struct btrace_tinfo_pt pt;
} variant;
#endif /* HAVE_LINUX_PERF_EVENT_H */
static struct cmd_list_element *set_record_btrace_bts_cmdlist;
static struct cmd_list_element *show_record_btrace_bts_cmdlist;
+/* Command lists for "set/show record btrace pt". */
+static struct cmd_list_element *set_record_btrace_pt_cmdlist;
+static struct cmd_list_element *show_record_btrace_pt_cmdlist;
+
/* Print a record-btrace debug message. Use do ... while (0) to avoid
ambiguities when used in if statements. */
}
}
+/* Print an Intel(R) Processor Trace configuration. */
+
+static void
+record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
+{
+ const char *suffix;
+ unsigned int size;
+
+ size = conf->size;
+ if (size > 0)
+ {
+ suffix = record_btrace_adjust_size (&size);
+ printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
+ }
+}
+
/* Print a branch tracing configuration. */
static void
case BTRACE_FORMAT_BTS:
record_btrace_print_bts_conf (&conf->bts);
return;
+
+ case BTRACE_FORMAT_PT:
+ record_btrace_print_pt_conf (&conf->pt);
+ return;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
break;
}
break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ switch (errcode)
+ {
+ case BDE_PT_USER_QUIT:
+ is_error = 0;
+ errstr = _("trace decode cancelled");
+ break;
+
+ case BDE_PT_DISABLED:
+ is_error = 0;
+ errstr = _("disabled");
+ break;
+
+ case BDE_PT_OVERFLOW:
+ is_error = 0;
+ errstr = _("overflow");
+ break;
+
+ default:
+ if (errcode < 0)
+ errstr = pt_errstr (pt_errcode (errcode));
+ break;
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
}
ui_out_text (uiout, _("["));
static void
cmd_record_btrace_bts_start (char *args, int from_tty)
{
-
if (args != NULL && *args != 0)
error (_("Invalid argument."));
END_CATCH
}
-/* Alias for "target record". */
+/* Start recording Intel(R) Processor Trace. */
static void
-cmd_record_btrace_start (char *args, int from_tty)
+cmd_record_btrace_pt_start (char *args, int from_tty)
{
-
if (args != NULL && *args != 0)
error (_("Invalid argument."));
- record_btrace_conf.format = BTRACE_FORMAT_BTS;
+ record_btrace_conf.format = BTRACE_FORMAT_PT;
TRY
{
END_CATCH
}
+/* Alias for "target record". */
+
+static void
+cmd_record_btrace_start (char *args, int from_tty)
+{
+ if (args != NULL && *args != 0)
+ error (_("Invalid argument."));
+
+ record_btrace_conf.format = BTRACE_FORMAT_PT;
+
+ TRY
+ {
+ execute_command ("target record-btrace", from_tty);
+ }
+ CATCH (exception, RETURN_MASK_ALL)
+ {
+ record_btrace_conf.format = BTRACE_FORMAT_BTS;
+
+ TRY
+ {
+ execute_command ("target record-btrace", from_tty);
+ }
+ CATCH (exception, RETURN_MASK_ALL)
+ {
+ record_btrace_conf.format = BTRACE_FORMAT_NONE;
+ throw_exception (exception);
+ }
+ END_CATCH
+ }
+ END_CATCH
+}
+
/* The "set record btrace" command. */
static void
cmd_set_record_btrace_bts (char *args, int from_tty)
{
printf_unfiltered (_("\"set record btrace bts\" must be followed "
- "by an apporpriate subcommand.\n"));
+ "by an appropriate subcommand.\n"));
help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
all_commands, gdb_stdout);
}
cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
}
+/* The "set record btrace pt" command. */
+
+static void
+cmd_set_record_btrace_pt (char *args, int from_tty)
+{
+ printf_unfiltered (_("\"set record btrace pt\" must be followed "
+ "by an appropriate subcommand.\n"));
+ help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
+ all_commands, gdb_stdout);
+}
+
+/* The "show record btrace pt" command. */
+
+static void
+cmd_show_record_btrace_pt (char *args, int from_tty)
+{
+ cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
+}
+
+/* The "record bts buffer-size" show value function. */
+
+static void
+show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c,
+ const char *value)
+{
+ fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
+ value);
+}
+
+/* The "record pt buffer-size" show value function. */
+
+static void
+show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c,
+ const char *value)
+{
+ fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
+ value);
+}
+
void _initialize_record_btrace (void);
/* Initialize btrace commands. */
&record_btrace_cmdlist);
add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
+ add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
+ _("\
+Start branch trace recording in Intel(R) Processor Trace format.\n\n\
+This format may not be available on all processors."),
+ &record_btrace_cmdlist);
+ add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
+
add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
_("Set record options"), &set_record_btrace_cmdlist,
"set record btrace ", 0, &set_record_cmdlist);
Use \"info record\" to see the actual buffer size.\n\n\
Bigger buffers allow longer recording but also take more time to process \
the recorded execution trace.\n\n\
-The trace buffer size may not be changed while recording."), NULL, NULL,
+The trace buffer size may not be changed while recording."), NULL,
+ show_record_bts_buffer_size_value,
&set_record_btrace_bts_cmdlist,
&show_record_btrace_bts_cmdlist);
+ add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
+ _("Set record btrace pt options"),
+ &set_record_btrace_pt_cmdlist,
+ "set record btrace pt ", 0, &set_record_btrace_cmdlist);
+
+ add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
+ _("Show record btrace pt options"),
+ &show_record_btrace_pt_cmdlist,
+ "show record btrace pt ", 0, &show_record_btrace_cmdlist);
+
+ add_setshow_uinteger_cmd ("buffer-size", no_class,
+ &record_btrace_conf.pt.size,
+ _("Set the record/replay pt buffer size."),
+ _("Show the record/replay pt buffer size."), _("\
+Bigger buffers allow longer recording but also take more time to process \
+the recorded execution.\n\
+The actual buffer size may differ from the requested size. Use \"info record\" \
+to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
+ &set_record_btrace_pt_cmdlist,
+ &show_record_btrace_pt_cmdlist);
+
init_record_btrace_ops ();
add_target (&record_btrace_ops);
xcalloc, xfree);
record_btrace_conf.bts.size = 64 * 1024;
+ record_btrace_conf.pt.size = 16 * 1024;
}
PACKET_QTBuffer_size,
PACKET_Qbtrace_off,
PACKET_Qbtrace_bts,
+ PACKET_Qbtrace_pt,
PACKET_qXfer_btrace,
/* Support for the QNonStop packet. */
/* Support for vfork events. */
PACKET_vfork_event_feature,
+ /* Support for the Qbtrace-conf:pt:size packet. */
+ PACKET_Qbtrace_conf_pt_size,
+
PACKET_MAX
};
{ "tracenz", PACKET_DISABLE, remote_supported_packet, PACKET_tracenz_feature },
{ "Qbtrace:off", PACKET_DISABLE, remote_supported_packet, PACKET_Qbtrace_off },
{ "Qbtrace:bts", PACKET_DISABLE, remote_supported_packet, PACKET_Qbtrace_bts },
+ { "Qbtrace:pt", PACKET_DISABLE, remote_supported_packet, PACKET_Qbtrace_pt },
{ "qXfer:btrace:read", PACKET_DISABLE, remote_supported_packet,
PACKET_qXfer_btrace },
{ "qXfer:btrace-conf:read", PACKET_DISABLE, remote_supported_packet,
PACKET_fork_event_feature },
{ "vfork-events", PACKET_DISABLE, remote_supported_packet,
PACKET_vfork_event_feature },
+ { "Qbtrace-conf:pt:size", PACKET_DISABLE, remote_supported_packet,
+ PACKET_Qbtrace_conf_pt_size }
};
static char *remote_support_xml;
case BTRACE_FORMAT_BTS:
return (packet_support (PACKET_Qbtrace_bts) == PACKET_ENABLE);
+
+ case BTRACE_FORMAT_PT:
+ /* The trace is decoded on the host. Even if our target supports it,
+ we still need to have libipt to decode the trace. */
+#if defined (HAVE_LIBIPT)
+ return (packet_support (PACKET_Qbtrace_pt) == PACKET_ENABLE);
+#else /* !defined (HAVE_LIBIPT) */
+ return 0;
+#endif /* !defined (HAVE_LIBIPT) */
}
internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
rs->btrace_config.bts.size = conf->bts.size;
}
+
+ packet = &remote_protocol_packets[PACKET_Qbtrace_conf_pt_size];
+ if (packet_config_support (packet) == PACKET_ENABLE
+ && conf->pt.size != rs->btrace_config.pt.size)
+ {
+ pos = buf;
+ pos += xsnprintf (pos, endbuf - pos, "%s=0x%x", packet->name,
+ conf->pt.size);
+
+ putpkt (buf);
+ getpkt (&buf, &rs->buf_size, 0);
+
+ if (packet_ok (buf, packet) == PACKET_ERROR)
+ {
+ if (buf[0] == 'E' && buf[1] == '.')
+ error (_("Failed to configure the trace buffer size: %s"), buf + 2);
+ else
+ error (_("Failed to configure the trace buffer size."));
+ }
+
+ rs->btrace_config.pt.size = conf->pt.size;
+ }
}
/* Read the current thread's btrace configuration from the target and
char *xml;
xml = target_read_stralloc (¤t_target,
- TARGET_OBJECT_BTRACE_CONF, "");
+ TARGET_OBJECT_BTRACE_CONF, "");
if (xml != NULL)
{
struct cleanup *cleanup;
const struct btrace_config *conf)
{
struct btrace_target_info *tinfo = NULL;
- struct packet_config *packet = &remote_protocol_packets[PACKET_Qbtrace_bts];
+ struct packet_config *packet = NULL;
struct remote_state *rs = get_remote_state ();
char *buf = rs->buf;
char *endbuf = rs->buf + get_remote_packet_size ();
- if (packet_config_support (packet) != PACKET_ENABLE)
+ switch (conf->format)
+ {
+ case BTRACE_FORMAT_BTS:
+ packet = &remote_protocol_packets[PACKET_Qbtrace_bts];
+ break;
+
+ case BTRACE_FORMAT_PT:
+ packet = &remote_protocol_packets[PACKET_Qbtrace_pt];
+ break;
+ }
+
+ if (packet == NULL || packet_config_support (packet) != PACKET_ENABLE)
error (_("Target does not support branch tracing."));
btrace_sync_conf (conf);
}
xml = target_read_stralloc (¤t_target,
- TARGET_OBJECT_BTRACE, annex);
+ TARGET_OBJECT_BTRACE, annex);
if (xml == NULL)
return BTRACE_ERR_UNKNOWN;
"Qbtrace:off", "disable-btrace", 0);
add_packet_config_cmd (&remote_protocol_packets[PACKET_Qbtrace_bts],
- "Qbtrace:bts", "enable-btrace", 0);
+ "Qbtrace:bts", "enable-btrace-bts", 0);
+
+ add_packet_config_cmd (&remote_protocol_packets[PACKET_Qbtrace_pt],
+ "Qbtrace:pt", "enable-btrace-pt", 0);
add_packet_config_cmd (&remote_protocol_packets[PACKET_qXfer_btrace],
"qXfer:btrace", "read-btrace", 0);
add_packet_config_cmd (&remote_protocol_packets[PACKET_vfork_event_feature],
"vfork-event-feature", "vfork-event-feature", 0);
+ add_packet_config_cmd (&remote_protocol_packets[PACKET_Qbtrace_conf_pt_size],
+ "Qbtrace-conf:pt:size", "btrace-conf-pt-size", 0);
+
/* Assert that we've registered "set remote foo-packet" commands
for all packet configs. */
{