#include <unistd.h>
#include <errno.h>
+#ifndef HAVE_DIRENT_D_TYPE
+#include <limits.h> // PATH_MAX
+#endif
+
#include <drm-uapi/i915_drm.h>
#include "common/gen_gem.h"
-#include "gen_perf.h"
-#include "perf/gen_perf_mdapi.h"
-#include "perf/gen_perf_metrics.h"
#include "dev/gen_debug.h"
#include "dev/gen_device_info.h"
+
+#include "perf/gen_perf.h"
+#include "perf/gen_perf_regs.h"
+#include "perf/gen_perf_mdapi.h"
+#include "perf/gen_perf_metrics.h"
+#include "perf/gen_perf_private.h"
+
#include "util/bitscan.h"
+#include "util/macros.h"
+#include "util/mesa-sha1.h"
#include "util/u_math.h"
#define FILE_DEBUG_FLAG DEBUG_PERFMON
-#define MI_RPC_BO_SIZE 4096
-#define MI_FREQ_START_OFFSET_BYTES (3072)
-#define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
-#define MI_FREQ_END_OFFSET_BYTES (3076)
-#define MAP_READ (1 << 0)
-#define MAP_WRITE (1 << 1)
+#define OA_REPORT_INVALID_CTX_ID (0xffffffff)
+
+static bool
+is_dir_or_link(const struct dirent *entry, const char *parent_dir)
+{
+#ifdef HAVE_DIRENT_D_TYPE
+ return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
+#else
+ struct stat st;
+ char path[PATH_MAX + 1];
+ snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
+ lstat(path, &st);
+ return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
+#endif
+}
static bool
get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
perf->sysfs_dev_dir[0] = '\0';
+ if (unlikely(INTEL_DEBUG & DEBUG_NO_OACONFIG))
+ return true;
+
if (fstat(fd, &sb)) {
DBG("Failed to stat DRM fd\n");
return false;
}
while ((drm_entry = readdir(drmdir))) {
- if ((drm_entry->d_type == DT_DIR ||
- drm_entry->d_type == DT_LNK) &&
+ if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
strncmp(drm_entry->d_name, "card", 4) == 0)
{
len = snprintf(perf->sysfs_dev_dir,
static void
register_oa_config(struct gen_perf_config *perf,
+ const struct gen_device_info *devinfo,
const struct gen_perf_query_info *query,
uint64_t config_id)
{
- struct gen_perf_query_info *registred_query =
- gen_perf_query_append_query_info(perf, 0);
-
- *registred_query = *query;
- registred_query->oa_metrics_set_id = config_id;
- DBG("metric set registred: id = %" PRIu64", guid = %s\n",
- registred_query->oa_metrics_set_id, query->guid);
+ struct gen_perf_query_info *registered_query =
+ gen_perf_append_query_info(perf, 0);
+
+ *registered_query = *query;
+ registered_query->oa_format = devinfo->gen >= 8 ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_A45_B8_C8;
+ registered_query->oa_metrics_set_id = config_id;
+ DBG("metric set registered: id = %" PRIu64", guid = %s\n",
+ registered_query->oa_metrics_set_id, query->guid);
}
static void
-enumerate_sysfs_metrics(struct gen_perf_config *perf)
+enumerate_sysfs_metrics(struct gen_perf_config *perf,
+ const struct gen_device_info *devinfo)
{
DIR *metricsdir = NULL;
struct dirent *metric_entry;
while ((metric_entry = readdir(metricsdir))) {
struct hash_entry *entry;
-
- if ((metric_entry->d_type != DT_DIR &&
- metric_entry->d_type != DT_LNK) ||
+ if (!is_dir_or_link(metric_entry, buf) ||
metric_entry->d_name[0] == '.')
continue;
metric_entry->d_name);
if (entry) {
uint64_t id;
-
- len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
- perf->sysfs_dev_dir, metric_entry->d_name);
- if (len < 0 || len >= sizeof(buf)) {
- DBG("Failed to concatenate path to sysfs metric id file\n");
- continue;
- }
-
- if (!read_file_uint64(buf, &id)) {
+ if (!gen_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
DBG("Failed to read metric set id from %s: %m", buf);
continue;
}
- register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
+ register_oa_config(perf, devinfo,
+ (const struct gen_perf_query_info *)entry->data, id);
} else
DBG("metric set not known by mesa (skipping)\n");
}
closedir(metricsdir);
}
+static void
+add_all_metrics(struct gen_perf_config *perf,
+ const struct gen_device_info *devinfo)
+{
+ hash_table_foreach(perf->oa_metrics_table, entry) {
+ const struct gen_perf_query_info *query = entry->data;
+ register_oa_config(perf, devinfo, query, 0);
+ }
+}
+
static bool
kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
{
&invalid_config_id) < 0 && errno == ENOENT;
}
+static int
+i915_query_items(struct gen_perf_config *perf, int fd,
+ struct drm_i915_query_item *items, uint32_t n_items)
+{
+ struct drm_i915_query q = {
+ .num_items = n_items,
+ .items_ptr = to_user_pointer(items),
+ };
+ return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
+}
+
+static bool
+i915_query_perf_config_supported(struct gen_perf_config *perf, int fd)
+{
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_PERF_CONFIG,
+ .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
+ };
+
+ return i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0;
+}
+
+static bool
+i915_query_perf_config_data(struct gen_perf_config *perf,
+ int fd, const char *guid,
+ struct drm_i915_perf_oa_config *config)
+{
+ struct {
+ struct drm_i915_query_perf_config query;
+ struct drm_i915_perf_oa_config config;
+ } item_data;
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_PERF_CONFIG,
+ .flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+ .data_ptr = to_user_pointer(&item_data),
+ .length = sizeof(item_data),
+ };
+
+ memset(&item_data, 0, sizeof(item_data));
+ memcpy(item_data.query.uuid, guid, sizeof(item_data.query.uuid));
+ memcpy(&item_data.config, config, sizeof(item_data.config));
+
+ if (!(i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0))
+ return false;
+
+ memcpy(config, &item_data.config, sizeof(item_data.config));
+
+ return true;
+}
+
bool
-gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
+gen_perf_load_metric_id(struct gen_perf_config *perf_cfg,
+ const char *guid,
uint64_t *metric_id)
{
char config_path[280];
snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
- perf->sysfs_dev_dir, guid);
+ perf_cfg->sysfs_dev_dir, guid);
/* Don't recreate already loaded configs. */
return read_file_uint64(config_path, metric_id);
}
+static uint64_t
+i915_add_config(struct gen_perf_config *perf, int fd,
+ const struct gen_perf_registers *config,
+ const char *guid)
+{
+ struct drm_i915_perf_oa_config i915_config = { 0, };
+
+ memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
+
+ i915_config.n_mux_regs = config->n_mux_regs;
+ i915_config.mux_regs_ptr = to_const_user_pointer(config->mux_regs);
+
+ i915_config.n_boolean_regs = config->n_b_counter_regs;
+ i915_config.boolean_regs_ptr = to_const_user_pointer(config->b_counter_regs);
+
+ i915_config.n_flex_regs = config->n_flex_regs;
+ i915_config.flex_regs_ptr = to_const_user_pointer(config->flex_regs);
+
+ int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
+ return ret > 0 ? ret : 0;
+}
+
static void
-init_oa_configs(struct gen_perf_config *perf, int fd)
+init_oa_configs(struct gen_perf_config *perf, int fd,
+ const struct gen_device_info *devinfo)
{
hash_table_foreach(perf->oa_metrics_table, entry) {
const struct gen_perf_query_info *query = entry->data;
- struct drm_i915_perf_oa_config config;
uint64_t config_id;
- int ret;
if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
DBG("metric set: %s (already loaded)\n", query->guid);
- register_oa_config(perf, query, config_id);
+ register_oa_config(perf, devinfo, query, config_id);
continue;
}
- memset(&config, 0, sizeof(config));
-
- memcpy(config.uuid, query->guid, sizeof(config.uuid));
-
- config.n_mux_regs = query->n_mux_regs;
- config.mux_regs_ptr = (uintptr_t) query->mux_regs;
-
- config.n_boolean_regs = query->n_b_counter_regs;
- config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
-
- config.n_flex_regs = query->n_flex_regs;
- config.flex_regs_ptr = (uintptr_t) query->flex_regs;
-
- ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
+ int ret = i915_add_config(perf, fd, &query->config, query->guid);
if (ret < 0) {
DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
query->name, query->guid, strerror(errno));
continue;
}
- register_oa_config(perf, query, ret);
+ register_oa_config(perf, devinfo, query, ret);
DBG("metric set: %s (added)\n", query->guid);
}
}
{
uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
- if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
- return false;
+ if (likely(!(INTEL_DEBUG & DEBUG_NO_OACONFIG))) {
+ if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
+ return false;
- if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
- return false;
+ if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
+ return false;
+ } else {
+ min_freq_mhz = 300;
+ max_freq_mhz = 1000;
+ }
memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
}
if (devinfo->is_cannonlake)
return gen_oa_register_queries_cnl;
- if (devinfo->gen == 11)
+ if (devinfo->gen == 11) {
+ if (devinfo->is_elkhartlake)
+ return gen_oa_register_queries_lkf;
return gen_oa_register_queries_icl;
+ }
+ if (devinfo->gen == 12)
+ return gen_oa_register_queries_tgl;
return NULL;
}
-bool
-gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
- const struct gen_device_info *devinfo)
+static int
+gen_perf_compare_counter_names(const void *v1, const void *v2)
+{
+ const struct gen_perf_query_counter *c1 = v1;
+ const struct gen_perf_query_counter *c2 = v2;
+
+ return strcmp(c1->name, c2->name);
+}
+
+static void
+sort_query(struct gen_perf_query_info *q)
+{
+ qsort(q->counters, q->n_counters, sizeof(q->counters[0]),
+ gen_perf_compare_counter_names);
+}
+
+static void
+load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
+ const struct gen_device_info *devinfo)
+{
+ struct gen_perf_query_info *query =
+ gen_perf_append_query_info(perf_cfg, MAX_STAT_COUNTERS);
+
+ query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
+ query->name = "Pipeline Statistics Registers";
+
+ gen_perf_query_add_basic_stat_reg(query, IA_VERTICES_COUNT,
+ "N vertices submitted");
+ gen_perf_query_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
+ "N primitives submitted");
+ gen_perf_query_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
+ "N vertex shader invocations");
+
+ if (devinfo->gen == 6) {
+ gen_perf_query_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
+ "SO_PRIM_STORAGE_NEEDED",
+ "N geometry shader stream-out primitives (total)");
+ gen_perf_query_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
+ "SO_NUM_PRIMS_WRITTEN",
+ "N geometry shader stream-out primitives (written)");
+ } else {
+ gen_perf_query_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
+ "SO_PRIM_STORAGE_NEEDED (Stream 0)",
+ "N stream-out (stream 0) primitives (total)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
+ "SO_PRIM_STORAGE_NEEDED (Stream 1)",
+ "N stream-out (stream 1) primitives (total)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
+ "SO_PRIM_STORAGE_NEEDED (Stream 2)",
+ "N stream-out (stream 2) primitives (total)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
+ "SO_PRIM_STORAGE_NEEDED (Stream 3)",
+ "N stream-out (stream 3) primitives (total)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
+ "SO_NUM_PRIMS_WRITTEN (Stream 0)",
+ "N stream-out (stream 0) primitives (written)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
+ "SO_NUM_PRIMS_WRITTEN (Stream 1)",
+ "N stream-out (stream 1) primitives (written)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
+ "SO_NUM_PRIMS_WRITTEN (Stream 2)",
+ "N stream-out (stream 2) primitives (written)");
+ gen_perf_query_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
+ "SO_NUM_PRIMS_WRITTEN (Stream 3)",
+ "N stream-out (stream 3) primitives (written)");
+ }
+
+ gen_perf_query_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
+ "N TCS shader invocations");
+ gen_perf_query_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
+ "N TES shader invocations");
+
+ gen_perf_query_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
+ "N geometry shader invocations");
+ gen_perf_query_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
+ "N geometry shader primitives emitted");
+
+ gen_perf_query_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
+ "N primitives entering clipping");
+ gen_perf_query_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
+ "N primitives leaving clipping");
+
+ if (devinfo->is_haswell || devinfo->gen == 8) {
+ gen_perf_query_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
+ "N fragment shader invocations",
+ "N fragment shader invocations");
+ } else {
+ gen_perf_query_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
+ "N fragment shader invocations");
+ }
+
+ gen_perf_query_add_basic_stat_reg(query, PS_DEPTH_COUNT,
+ "N z-pass fragments");
+
+ if (devinfo->gen >= 7) {
+ gen_perf_query_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
+ "N compute shader invocations");
+ }
+
+ query->data_size = sizeof(uint64_t) * query->n_counters;
+
+ sort_query(query);
+}
+
+static int
+i915_perf_version(int drm_fd)
+{
+ int tmp;
+ drm_i915_getparam_t gp = {
+ .param = I915_PARAM_PERF_REVISION,
+ .value = &tmp,
+ };
+
+ int ret = gen_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+ /* Return 0 if this getparam is not supported, the first version supported
+ * is 1.
+ */
+ return ret < 0 ? 0 : tmp;
+}
+
+static void
+i915_get_sseu(int drm_fd, struct drm_i915_gem_context_param_sseu *sseu)
+{
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_SSEU,
+ .size = sizeof(*sseu),
+ .value = to_user_pointer(sseu)
+ };
+
+ gen_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+}
+
+static inline int
+compare_str_or_null(const char *s1, const char *s2)
+{
+ if (s1 == NULL && s2 == NULL)
+ return 0;
+ if (s1 == NULL)
+ return -1;
+ if (s2 == NULL)
+ return 1;
+
+ return strcmp(s1, s2);
+}
+
+static int
+compare_counter_categories_and_names(const void *_c1, const void *_c2)
+{
+ const struct gen_perf_query_counter_info *c1 = (const struct gen_perf_query_counter_info *)_c1;
+ const struct gen_perf_query_counter_info *c2 = (const struct gen_perf_query_counter_info *)_c2;
+
+ /* pipeline counters don't have an assigned category */
+ int r = compare_str_or_null(c1->counter->category, c2->counter->category);
+ if (r)
+ return r;
+
+ return strcmp(c1->counter->name, c2->counter->name);
+}
+
+static void
+build_unique_counter_list(struct gen_perf_config *perf)
+{
+ assert(perf->n_queries < 64);
+
+ size_t max_counters = 0;
+
+ for (int q = 0; q < perf->n_queries; q++)
+ max_counters += perf->queries[q].n_counters;
+
+ /*
+ * Allocate big enough array to hold maximum possible number of counters.
+ * We can't alloc it small and realloc when needed because the hash table
+ * below contains pointers to this array.
+ */
+ struct gen_perf_query_counter_info *counter_infos =
+ ralloc_array_size(perf, sizeof(counter_infos[0]), max_counters);
+
+ perf->n_counters = 0;
+
+ struct hash_table *counters_table =
+ _mesa_hash_table_create(perf,
+ _mesa_hash_string,
+ _mesa_key_string_equal);
+ struct hash_entry *entry;
+ for (int q = 0; q < perf->n_queries ; q++) {
+ struct gen_perf_query_info *query = &perf->queries[q];
+
+ for (int c = 0; c < query->n_counters; c++) {
+ struct gen_perf_query_counter *counter;
+ struct gen_perf_query_counter_info *counter_info;
+
+ counter = &query->counters[c];
+ entry = _mesa_hash_table_search(counters_table, counter->symbol_name);
+
+ if (entry) {
+ counter_info = entry->data;
+ counter_info->query_mask |= BITFIELD64_BIT(q);
+ continue;
+ }
+ assert(perf->n_counters < max_counters);
+
+ counter_info = &counter_infos[perf->n_counters++];
+ counter_info->counter = counter;
+ counter_info->query_mask = BITFIELD64_BIT(q);
+
+ counter_info->location.group_idx = q;
+ counter_info->location.counter_idx = c;
+
+ _mesa_hash_table_insert(counters_table, counter->symbol_name, counter_info);
+ }
+ }
+
+ _mesa_hash_table_destroy(counters_table, NULL);
+
+ /* Now we can realloc counter_infos array because hash table doesn't exist. */
+ perf->counter_infos = reralloc_array_size(perf, counter_infos,
+ sizeof(counter_infos[0]), perf->n_counters);
+
+ qsort(perf->counter_infos, perf->n_counters, sizeof(perf->counter_infos[0]),
+ compare_counter_categories_and_names);
+}
+
+static bool
+oa_metrics_available(struct gen_perf_config *perf, int fd,
+ const struct gen_device_info *devinfo)
{
perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
bool i915_perf_oa_available = false;
struct stat sb;
+ perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
+ perf->i915_perf_version = i915_perf_version(fd);
+
+ /* Record the default SSEU configuration. */
+ i915_get_sseu(fd, &perf->sseu);
+
/* The existence of this sysctl parameter implies the kernel supports
* the i915 perf interface.
*/
if (paranoid == 0 || geteuid() == 0)
i915_perf_oa_available = true;
}
+
+ perf->platform_supported = oa_register != NULL;
}
- if (!i915_perf_oa_available ||
- !oa_register ||
- !get_sysfs_dev_dir(perf, fd) ||
- !init_oa_sys_vars(perf, devinfo))
- return false;
+ return i915_perf_oa_available &&
+ oa_register &&
+ get_sysfs_dev_dir(perf, fd) &&
+ init_oa_sys_vars(perf, devinfo);
+}
+
+static void
+load_oa_metrics(struct gen_perf_config *perf, int fd,
+ const struct gen_device_info *devinfo)
+{
+ int existing_queries = perf->n_queries;
+
+ perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
perf->oa_metrics_table =
- _mesa_hash_table_create(perf, _mesa_key_hash_string,
+ _mesa_hash_table_create(perf, _mesa_hash_string,
_mesa_key_string_equal);
/* Index all the metric sets mesa knows about before looking to see what
*/
oa_register(perf);
- if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
- kernel_has_dynamic_config_support(perf, fd))
- init_oa_configs(perf, fd);
- else
- enumerate_sysfs_metrics(perf);
+ if (likely(!(INTEL_DEBUG & DEBUG_NO_OACONFIG))) {
+ if (kernel_has_dynamic_config_support(perf, fd))
+ init_oa_configs(perf, fd, devinfo);
+ else
+ enumerate_sysfs_metrics(perf, devinfo);
+ } else {
+ add_all_metrics(perf, devinfo);
+ }
- return true;
+ /* sort counters in each individual group created by this function by name */
+ for (int i = existing_queries; i < perf->n_queries; ++i)
+ sort_query(&perf->queries[i]);
+
+ /* Select a fallback OA metric. Look for the TestOa metric or use the last
+ * one if no present (on HSW).
+ */
+ for (int i = existing_queries; i < perf->n_queries; i++) {
+ if (perf->queries[i].symbol_name &&
+ strcmp(perf->queries[i].symbol_name, "TestOa") == 0) {
+ perf->fallback_raw_oa_metric = perf->queries[i].oa_metrics_set_id;
+ break;
+ }
+ }
+ if (perf->fallback_raw_oa_metric == 0)
+ perf->fallback_raw_oa_metric = perf->queries[perf->n_queries - 1].oa_metrics_set_id;
+}
+
+struct gen_perf_registers *
+gen_perf_load_configuration(struct gen_perf_config *perf_cfg, int fd, const char *guid)
+{
+ if (!perf_cfg->i915_query_supported)
+ return NULL;
+
+ struct drm_i915_perf_oa_config i915_config = { 0, };
+ if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
+ return NULL;
+
+ struct gen_perf_registers *config = rzalloc(NULL, struct gen_perf_registers);
+ config->n_flex_regs = i915_config.n_flex_regs;
+ config->flex_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_flex_regs);
+ config->n_mux_regs = i915_config.n_mux_regs;
+ config->mux_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_mux_regs);
+ config->n_b_counter_regs = i915_config.n_boolean_regs;
+ config->b_counter_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_b_counter_regs);
+
+ /*
+ * struct gen_perf_query_register_prog maps exactly to the tuple of
+ * (register offset, register value) returned by the i915.
+ */
+ i915_config.flex_regs_ptr = to_const_user_pointer(config->flex_regs);
+ i915_config.mux_regs_ptr = to_const_user_pointer(config->mux_regs);
+ i915_config.boolean_regs_ptr = to_const_user_pointer(config->b_counter_regs);
+ if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
+ ralloc_free(config);
+ return NULL;
+ }
+
+ return config;
+}
+
+uint64_t
+gen_perf_store_configuration(struct gen_perf_config *perf_cfg, int fd,
+ const struct gen_perf_registers *config,
+ const char *guid)
+{
+ if (guid)
+ return i915_add_config(perf_cfg, fd, config, guid);
+
+ struct mesa_sha1 sha1_ctx;
+ _mesa_sha1_init(&sha1_ctx);
+
+ if (config->flex_regs) {
+ _mesa_sha1_update(&sha1_ctx, config->flex_regs,
+ sizeof(config->flex_regs[0]) *
+ config->n_flex_regs);
+ }
+ if (config->mux_regs) {
+ _mesa_sha1_update(&sha1_ctx, config->mux_regs,
+ sizeof(config->mux_regs[0]) *
+ config->n_mux_regs);
+ }
+ if (config->b_counter_regs) {
+ _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
+ sizeof(config->b_counter_regs[0]) *
+ config->n_b_counter_regs);
+ }
+
+ uint8_t hash[20];
+ _mesa_sha1_final(&sha1_ctx, hash);
+
+ char formatted_hash[41];
+ _mesa_sha1_format(formatted_hash, hash);
+
+ char generated_guid[37];
+ snprintf(generated_guid, sizeof(generated_guid),
+ "%.8s-%.4s-%.4s-%.4s-%.12s",
+ &formatted_hash[0], &formatted_hash[8],
+ &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
+ &formatted_hash[8 + 4 + 4 + 4]);
+
+ /* Check if already present. */
+ uint64_t id;
+ if (gen_perf_load_metric_id(perf_cfg, generated_guid, &id))
+ return id;
+
+ return i915_add_config(perf_cfg, fd, config, generated_guid);
+}
+
+static uint64_t
+get_passes_mask(struct gen_perf_config *perf,
+ const uint32_t *counter_indices,
+ uint32_t counter_indices_count)
+{
+ uint64_t queries_mask = 0;
+
+ assert(perf->n_queries < 64);
+
+ /* Compute the number of passes by going through all counters N times (with
+ * N the number of queries) to make sure we select the most constraining
+ * counters first and look at the more flexible ones (that could be
+ * obtained from multiple queries) later. That way we minimize the number
+ * of passes required.
+ */
+ for (uint32_t q = 0; q < perf->n_queries; q++) {
+ for (uint32_t i = 0; i < counter_indices_count; i++) {
+ assert(counter_indices[i] < perf->n_counters);
+
+ uint32_t idx = counter_indices[i];
+ if (__builtin_popcount(perf->counter_infos[idx].query_mask) != (q + 1))
+ continue;
+
+ if (queries_mask & perf->counter_infos[idx].query_mask)
+ continue;
+
+ queries_mask |= BITFIELD64_BIT(ffsll(perf->counter_infos[idx].query_mask) - 1);
+ }
+ }
+
+ return queries_mask;
+}
+
+uint32_t
+gen_perf_get_n_passes(struct gen_perf_config *perf,
+ const uint32_t *counter_indices,
+ uint32_t counter_indices_count,
+ struct gen_perf_query_info **pass_queries)
+{
+ uint64_t queries_mask = get_passes_mask(perf, counter_indices, counter_indices_count);
+
+ if (pass_queries) {
+ uint32_t pass = 0;
+ for (uint32_t q = 0; q < perf->n_queries; q++) {
+ if ((1ULL << q) & queries_mask)
+ pass_queries[pass++] = &perf->queries[q];
+ }
+ }
+
+ return __builtin_popcount(queries_mask);
+}
+
+void
+gen_perf_get_counters_passes(struct gen_perf_config *perf,
+ const uint32_t *counter_indices,
+ uint32_t counter_indices_count,
+ struct gen_perf_counter_pass *counter_pass)
+{
+ uint64_t queries_mask = get_passes_mask(perf, counter_indices, counter_indices_count);
+ ASSERTED uint32_t n_passes = __builtin_popcount(queries_mask);
+
+ for (uint32_t i = 0; i < counter_indices_count; i++) {
+ assert(counter_indices[i] < perf->n_counters);
+
+ uint32_t idx = counter_indices[i];
+ counter_pass[i].counter = perf->counter_infos[idx].counter;
+
+ uint32_t query_idx = ffsll(perf->counter_infos[idx].query_mask & queries_mask) - 1;
+ counter_pass[i].query = &perf->queries[query_idx];
+
+ uint32_t clear_bits = 63 - query_idx;
+ counter_pass[i].pass = __builtin_popcount((queries_mask << clear_bits) >> clear_bits) - 1;
+ assert(counter_pass[i].pass < n_passes);
+ }
}
/* Accumulate 32bits OA counters */
const uint32_t *start,
const uint32_t *end)
{
- int i, idx = 0;
+ int i;
- result->hw_id = start[2];
+ if (result->hw_id == OA_REPORT_INVALID_CTX_ID &&
+ start[2] != OA_REPORT_INVALID_CTX_ID)
+ result->hw_id = start[2];
+ if (result->reports_accumulated == 0)
+ result->begin_timestamp = start[1];
result->reports_accumulated++;
switch (query->oa_format) {
case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
- accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
- accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
+ accumulate_uint32(start + 1, end + 1,
+ result->accumulator + query->gpu_time_offset); /* timestamp */
+ accumulate_uint32(start + 3, end + 3,
+ result->accumulator + query->gpu_clock_offset); /* clock */
/* 32x 40bit A counters... */
- for (i = 0; i < 32; i++)
- accumulate_uint40(i, start, end, result->accumulator + idx++);
+ for (i = 0; i < 32; i++) {
+ accumulate_uint40(i, start, end,
+ result->accumulator + query->a_offset + i);
+ }
/* 4x 32bit A counters... */
- for (i = 0; i < 4; i++)
- accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
+ for (i = 0; i < 4; i++) {
+ accumulate_uint32(start + 36 + i, end + 36 + i,
+ result->accumulator + query->a_offset + 32 + i);
+ }
+
+ /* 8x 32bit B counters */
+ for (i = 0; i < 8; i++) {
+ accumulate_uint32(start + 48 + i, end + 48 + i,
+ result->accumulator + query->b_offset + i);
+ }
- /* 8x 32bit B counters + 8x 32bit C counters... */
- for (i = 0; i < 16; i++)
- accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
+ /* 8x 32bit C counters... */
+ for (i = 0; i < 8; i++) {
+ accumulate_uint32(start + 56 + i, end + 56 + i,
+ result->accumulator + query->c_offset + i);
+ }
break;
case I915_OA_FORMAT_A45_B8_C8:
accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
- for (i = 0; i < 61; i++)
- accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
+ for (i = 0; i < 61; i++) {
+ accumulate_uint32(start + 3 + i, end + 3 + i,
+ result->accumulator + query->a_offset + i);
+ }
break;
default:
gen_perf_query_result_clear(struct gen_perf_query_result *result)
{
memset(result, 0, sizeof(*result));
- result->hw_id = 0xffffffff; /* invalid */
-}
-
-static void
-fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
- const char *name,
- uint32_t data_offset,
- uint32_t data_size,
- enum gen_perf_counter_data_type data_type)
-{
- struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
-
- assert(query->n_counters <= query->max_counters);
-
- counter->name = name;
- counter->desc = "Raw counter value";
- counter->type = GEN_PERF_COUNTER_TYPE_RAW;
- counter->data_type = data_type;
- counter->offset = data_offset;
-
- query->n_counters++;
-
- assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
-}
-
-#define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
- fill_mdapi_perf_query_counter(query, #field_name, \
- (uint8_t *) &struct_name.field_name - \
- (uint8_t *) &struct_name, \
- sizeof(struct_name.field_name), \
- GEN_PERF_COUNTER_DATA_TYPE_##type_name)
-#define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
- fill_mdapi_perf_query_counter(query, \
- ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
- (uint8_t *) &struct_name.field_name[idx] - \
- (uint8_t *) &struct_name, \
- sizeof(struct_name.field_name[0]), \
- GEN_PERF_COUNTER_DATA_TYPE_##type_name)
-
-void
-gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
- struct gen_perf_config *perf)
-{
- struct gen_perf_query_info *query = NULL;
-
- /* MDAPI requires different structures for pretty much every generation
- * (right now we have definitions for gen 7 to 11).
- */
- if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
- return;
-
- switch (devinfo->gen) {
- case 7: {
- query = gen_perf_query_append_query_info(perf, 1 + 45 + 16 + 7);
- query->oa_format = I915_OA_FORMAT_A45_B8_C8;
-
- struct gen7_mdapi_metrics metric_data;
- query->data_size = sizeof(metric_data);
-
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
- for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, ACounters, i, UINT64);
- }
- for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, NOACounters, i, UINT64);
- }
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
- break;
- }
- case 8: {
- query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16);
- query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
-
- struct gen8_mdapi_metrics metric_data;
- query->data_size = sizeof(metric_data);
-
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
- for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, OaCntr, i, UINT64);
- }
- for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, NoaCntr, i, UINT64);
- }
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
- break;
- }
- case 9:
- case 10:
- case 11: {
- query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
- query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
-
- struct gen9_mdapi_metrics metric_data;
- query->data_size = sizeof(metric_data);
-
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
- for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, OaCntr, i, UINT64);
- }
- for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, NoaCntr, i, UINT64);
- }
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
- for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
- MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
- metric_data, UserCntr, i, UINT64);
- }
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
- MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
- break;
- }
- default:
- unreachable("Unsupported gen");
- break;
- }
-
- query->kind = GEN_PERF_QUERY_TYPE_RAW;
- query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
- query->guid = GEN_PERF_QUERY_GUID_MDAPI;
-
- {
- /* Accumulation buffer offsets copied from an actual query... */
- const struct gen_perf_query_info *copy_query =
- &perf->queries[0];
-
- query->gpu_time_offset = copy_query->gpu_time_offset;
- query->gpu_clock_offset = copy_query->gpu_clock_offset;
- query->a_offset = copy_query->a_offset;
- query->b_offset = copy_query->b_offset;
- query->c_offset = copy_query->c_offset;
- }
+ result->hw_id = OA_REPORT_INVALID_CTX_ID; /* invalid */
}
-void
-gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
- struct gen_perf_config *perf)
+static int
+gen_perf_compare_query_names(const void *v1, const void *v2)
{
- if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
- return;
-
- struct gen_perf_query_info *query =
- gen_perf_query_append_query_info(perf, MAX_STAT_COUNTERS);
-
- query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
- query->name = "Intel_Raw_Pipeline_Statistics_Query";
-
- /* The order has to match mdapi_pipeline_metrics. */
- gen_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
- "N vertices submitted");
- gen_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
- "N primitives submitted");
- gen_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
- "N vertex shader invocations");
- gen_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
- "N geometry shader invocations");
- gen_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
- "N geometry shader primitives emitted");
- gen_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
- "N primitives entering clipping");
- gen_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
- "N primitives leaving clipping");
- if (devinfo->is_haswell || devinfo->gen == 8) {
- gen_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
- "N fragment shader invocations",
- "N fragment shader invocations");
- } else {
- gen_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
- "N fragment shader invocations");
- }
- gen_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
- "N TCS shader invocations");
- gen_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
- "N TES shader invocations");
- if (devinfo->gen >= 7) {
- gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
- "N compute shader invocations");
- }
+ const struct gen_perf_query_info *q1 = v1;
+ const struct gen_perf_query_info *q2 = v2;
- if (devinfo->gen >= 10) {
- /* Reuse existing CS invocation register until we can expose this new
- * one.
- */
- gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
- "Reserved1");
- }
-
- query->data_size = sizeof(uint64_t) * query->n_counters;
-}
-
-uint64_t
-gen_perf_query_get_metric_id(struct gen_perf_config *perf,
- const struct gen_perf_query_info *query)
-{
- /* These queries are know not to ever change, their config ID has been
- * loaded upon the first query creation. No need to look them up again.
- */
- if (query->kind == GEN_PERF_QUERY_TYPE_OA)
- return query->oa_metrics_set_id;
-
- assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
-
- /* Raw queries can be reprogrammed up by an external application/library.
- * When a raw query is used for the first time it's id is set to a value !=
- * 0. When it stops being used the id returns to 0. No need to reload the
- * ID when it's already loaded.
- */
- if (query->oa_metrics_set_id != 0) {
- DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
- query->name, query->guid, query->oa_metrics_set_id);
- return query->oa_metrics_set_id;
- }
-
- struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
- if (!gen_perf_load_metric_id(perf, query->guid,
- &raw_query->oa_metrics_set_id)) {
- DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
- raw_query->oa_metrics_set_id = 1ULL;
- } else {
- DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
- query->name, query->guid, query->oa_metrics_set_id);
- }
- return query->oa_metrics_set_id;
-}
-
-struct oa_sample_buf *
-gen_perf_get_free_sample_buf(struct gen_perf_context *perf_ctx)
-{
- struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
- struct oa_sample_buf *buf;
-
- if (node)
- buf = exec_node_data(struct oa_sample_buf, node, link);
- else {
- buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
-
- exec_node_init(&buf->link);
- buf->refcount = 0;
- buf->len = 0;
- }
-
- return buf;
+ return strcmp(q1->name, q2->name);
}
void
-gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
-{
- struct exec_node *tail_node =
- exec_list_get_tail(&perf_ctx->sample_buffers);
- struct oa_sample_buf *tail_buf =
- exec_node_data(struct oa_sample_buf, tail_node, link);
-
- /* Remove all old, unreferenced sample buffers walking forward from
- * the head of the list, except always leave at least one node in
- * the list so we always have a node to reference when we Begin
- * a new query.
- */
- foreach_list_typed_safe(struct oa_sample_buf, buf, link,
- &perf_ctx->sample_buffers)
- {
- if (buf->refcount == 0 && buf != tail_buf) {
- exec_node_remove(&buf->link);
- exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
- } else
- return;
- }
-}
-
-void
-gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx)
-{
- foreach_list_typed_safe(struct oa_sample_buf, buf, link,
- &perf_ctx->free_sample_buffers)
- ralloc_free(buf);
-
- exec_list_make_empty(&perf_ctx->free_sample_buffers);
-}
-
-/******************************************************************************/
-
-/**
- * Emit MI_STORE_REGISTER_MEM commands to capture all of the
- * pipeline statistics for the performance query object.
- */
-void
-gen_perf_snapshot_statistics_registers(void *context,
- struct gen_perf_config *perf,
- struct gen_perf_query_object *obj,
- uint32_t offset_in_bytes)
-{
- const struct gen_perf_query_info *query = obj->queryinfo;
- const int n_counters = query->n_counters;
-
- for (int i = 0; i < n_counters; i++) {
- const struct gen_perf_query_counter *counter = &query->counters[i];
-
- assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
-
- perf->vtbl.store_register_mem64(context, obj->pipeline_stats.bo,
- counter->pipeline_stat.reg,
- offset_in_bytes + i * sizeof(uint64_t));
- }
-}
-
-void
-gen_perf_close(struct gen_perf_context *perfquery,
- const struct gen_perf_query_info *query)
-{
- if (perfquery->oa_stream_fd != -1) {
- close(perfquery->oa_stream_fd);
- perfquery->oa_stream_fd = -1;
- }
- if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
- struct gen_perf_query_info *raw_query =
- (struct gen_perf_query_info *) query;
- raw_query->oa_metrics_set_id = 0;
- }
-}
-
-bool
-gen_perf_open(struct gen_perf_context *perf_ctx,
- int metrics_set_id,
- int report_format,
- int period_exponent,
- int drm_fd,
- uint32_t ctx_id)
-{
- uint64_t properties[] = {
- /* Single context sampling */
- DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
-
- /* Include OA reports in samples */
- DRM_I915_PERF_PROP_SAMPLE_OA, true,
-
- /* OA unit configuration */
- DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
- DRM_I915_PERF_PROP_OA_FORMAT, report_format,
- DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
- };
- struct drm_i915_perf_open_param param = {
- .flags = I915_PERF_FLAG_FD_CLOEXEC |
- I915_PERF_FLAG_FD_NONBLOCK |
- I915_PERF_FLAG_DISABLED,
- .num_properties = ARRAY_SIZE(properties) / 2,
- .properties_ptr = (uintptr_t) properties,
- };
- int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
- if (fd == -1) {
- DBG("Error opening gen perf OA stream: %m\n");
- return false;
- }
-
- perf_ctx->oa_stream_fd = fd;
-
- perf_ctx->current_oa_metrics_set_id = metrics_set_id;
- perf_ctx->current_oa_format = report_format;
-
- return true;
-}
-
-bool
-gen_perf_inc_n_users(struct gen_perf_context *perf_ctx)
-{
- if (perf_ctx->n_oa_users == 0 &&
- gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
- {
- return false;
- }
- ++perf_ctx->n_oa_users;
-
- return true;
-}
-
-void
-gen_perf_dec_n_users(struct gen_perf_context *perf_ctx)
-{
- /* Disabling the i915 perf stream will effectively disable the OA
- * counters. Note it's important to be sure there are no outstanding
- * MI_RPC commands at this point since they could stall the CS
- * indefinitely once OACONTROL is disabled.
- */
- --perf_ctx->n_oa_users;
- if (perf_ctx->n_oa_users == 0 &&
- gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
- {
- DBG("WARNING: Error disabling gen perf stream: %m\n");
- }
-}
-
-void
-gen_perf_init_context(struct gen_perf_context *perf_ctx,
- struct gen_perf_config *perf_cfg,
- void * ctx, /* driver context (eg, brw_context) */
- void * bufmgr, /* eg brw_bufmgr */
+gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
const struct gen_device_info *devinfo,
- uint32_t hw_ctx,
- int drm_fd)
+ int drm_fd,
+ bool include_pipeline_statistics)
{
- perf_ctx->perf = perf_cfg;
- perf_ctx->ctx = ctx;
- perf_ctx->bufmgr = bufmgr;
- perf_ctx->drm_fd = drm_fd;
- perf_ctx->hw_ctx = hw_ctx;
- perf_ctx->devinfo = devinfo;
-
- perf_ctx->unaccumulated =
- ralloc_array(ctx, struct gen_perf_query_object *, 2);
- perf_ctx->unaccumulated_elements = 0;
- perf_ctx->unaccumulated_array_size = 2;
-
- exec_list_make_empty(&perf_ctx->sample_buffers);
- exec_list_make_empty(&perf_ctx->free_sample_buffers);
-
- /* It's convenient to guarantee that this linked list of sample
- * buffers is never empty so we add an empty head so when we
- * Begin an OA query we can always take a reference on a buffer
- * in this list.
- */
- struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(perf_ctx);
- exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
-
- perf_ctx->oa_stream_fd = -1;
- perf_ctx->next_query_start_report_id = 1000;
-}
-
-/**
- * Add a query to the global list of "unaccumulated queries."
- *
- * Queries are tracked here until all the associated OA reports have
- * been accumulated via accumulate_oa_reports() after the end
- * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
- */
-static void
-add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *obj)
-{
- if (perf_ctx->unaccumulated_elements >=
- perf_ctx->unaccumulated_array_size)
- {
- perf_ctx->unaccumulated_array_size *= 1.5;
- perf_ctx->unaccumulated =
- reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
- struct gen_perf_query_object *,
- perf_ctx->unaccumulated_array_size);
+ if (include_pipeline_statistics) {
+ load_pipeline_statistic_metrics(perf_cfg, devinfo);
+ gen_perf_register_mdapi_statistic_query(perf_cfg, devinfo);
}
- perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
-}
+ bool oa_metrics = oa_metrics_available(perf_cfg, drm_fd, devinfo);
+ if (oa_metrics)
+ load_oa_metrics(perf_cfg, drm_fd, devinfo);
-bool
-gen_perf_begin_query(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *query)
-{
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
- const struct gen_perf_query_info *queryinfo = query->queryinfo;
-
- /* XXX: We have to consider that the command parser unit that parses batch
- * buffer commands and is used to capture begin/end counter snapshots isn't
- * implicitly synchronized with what's currently running across other GPU
- * units (such as the EUs running shaders) that the performance counters are
- * associated with.
- *
- * The intention of performance queries is to measure the work associated
- * with commands between the begin/end delimiters and so for that to be the
- * case we need to explicitly synchronize the parsing of commands to capture
- * Begin/End counter snapshots with what's running across other parts of the
- * GPU.
- *
- * When the command parser reaches a Begin marker it effectively needs to
- * drain everything currently running on the GPU until the hardware is idle
- * before capturing the first snapshot of counters - otherwise the results
- * would also be measuring the effects of earlier commands.
- *
- * When the command parser reaches an End marker it needs to stall until
- * everything currently running on the GPU has finished before capturing the
- * end snapshot - otherwise the results won't be a complete representation
- * of the work.
- *
- * Theoretically there could be opportunities to minimize how much of the
- * GPU pipeline is drained, or that we stall for, when we know what specific
- * units the performance counters being queried relate to but we don't
- * currently attempt to be clever here.
- *
- * Note: with our current simple approach here then for back-to-back queries
- * we will redundantly emit duplicate commands to synchronize the command
- * streamer with the rest of the GPU pipeline, but we assume that in HW the
- * second synchronization is effectively a NOOP.
- *
- * N.B. The final results are based on deltas of counters between (inside)
- * Begin/End markers so even though the total wall clock time of the
- * workload is stretched by larger pipeline bubbles the bubbles themselves
- * are generally invisible to the query results. Whether that's a good or a
- * bad thing depends on the use case. For a lower real-time impact while
- * capturing metrics then periodic sampling may be a better choice than
- * INTEL_performance_query.
- *
- *
- * This is our Begin synchronization point to drain current work on the
- * GPU before we capture our first counter snapshot...
- */
- perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
-
- switch (queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW: {
+ /* sort query groups by name */
+ qsort(perf_cfg->queries, perf_cfg->n_queries,
+ sizeof(perf_cfg->queries[0]), gen_perf_compare_query_names);
- /* Opening an i915 perf stream implies exclusive access to the OA unit
- * which will generate counter reports for a specific counter set with a
- * specific layout/format so we can't begin any OA based queries that
- * require a different counter set or format unless we get an opportunity
- * to close the stream and open a new one...
- */
- uint64_t metric_id = gen_perf_query_get_metric_id(perf_ctx->perf, queryinfo);
+ build_unique_counter_list(perf_cfg);
- if (perf_ctx->oa_stream_fd != -1 &&
- perf_ctx->current_oa_metrics_set_id != metric_id) {
-
- if (perf_ctx->n_oa_users != 0) {
- DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
- perf_ctx->current_oa_metrics_set_id, metric_id);
- return false;
- } else
- gen_perf_close(perf_ctx, queryinfo);
- }
-
- /* If the OA counters aren't already on, enable them. */
- if (perf_ctx->oa_stream_fd == -1) {
- const struct gen_device_info *devinfo = perf_ctx->devinfo;
-
- /* The period_exponent gives a sampling period as follows:
- * sample_period = timestamp_period * 2^(period_exponent + 1)
- *
- * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
- * ~83ns (GEN8/9).
- *
- * The counter overflow period is derived from the EuActive counter
- * which reads a counter that increments by the number of clock
- * cycles multiplied by the number of EUs. It can be calculated as:
- *
- * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
- *
- * (E.g. 40 EUs @ 1GHz = ~53ms)
- *
- * We select a sampling period inferior to that overflow period to
- * ensure we cannot see more than 1 counter overflow, otherwise we
- * could loose information.
- */
-
- int a_counter_in_bits = 32;
- if (devinfo->gen >= 8)
- a_counter_in_bits = 40;
-
- uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
- /* drop 1GHz freq to have units in nanoseconds */
- 2);
-
- DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
- overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
-
- int period_exponent = 0;
- uint64_t prev_sample_period, next_sample_period;
- for (int e = 0; e < 30; e++) {
- prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
- next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
-
- /* Take the previous sampling period, lower than the overflow
- * period.
- */
- if (prev_sample_period < overflow_period &&
- next_sample_period > overflow_period)
- period_exponent = e + 1;
- }
-
- if (period_exponent == 0) {
- DBG("WARNING: enable to find a sampling exponent\n");
- return false;
- }
-
- DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
- prev_sample_period / 1000000ul);
-
- if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
- period_exponent, perf_ctx->drm_fd,
- perf_ctx->hw_ctx))
- return false;
- } else {
- assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
- perf_ctx->current_oa_format == queryinfo->oa_format);
- }
-
- if (!gen_perf_inc_n_users(perf_ctx)) {
- DBG("WARNING: Error enabling i915 perf stream: %m\n");
- return false;
- }
-
- if (query->oa.bo) {
- perf_cfg->vtbl.bo_unreference(query->oa.bo);
- query->oa.bo = NULL;
- }
-
- query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
- "perf. query OA MI_RPC bo",
- MI_RPC_BO_SIZE);
-#ifdef DEBUG
- /* Pre-filling the BO helps debug whether writes landed. */
- void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
- memset(map, 0x80, MI_RPC_BO_SIZE);
- perf_cfg->vtbl.bo_unmap(query->oa.bo);
-#endif
-
- query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
- perf_ctx->next_query_start_report_id += 2;
-
- /* We flush the batchbuffer here to minimize the chances that MI_RPC
- * delimiting commands end up in different batchbuffers. If that's the
- * case, the measurement will include the time it takes for the kernel
- * scheduler to load a new request into the hardware. This is manifested in
- * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
- */
- perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
-
- /* Take a starting OA counter snapshot. */
- perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
- query->oa.begin_report_id);
- perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
- MI_FREQ_START_OFFSET_BYTES);
-
- ++perf_ctx->n_active_oa_queries;
-
- /* No already-buffered samples can possibly be associated with this query
- * so create a marker within the list of sample buffers enabling us to
- * easily ignore earlier samples when processing this query after
- * completion.
- */
- assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
- query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
-
- struct oa_sample_buf *buf =
- exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
-
- /* This reference will ensure that future/following sample
- * buffers (that may relate to this query) can't be freed until
- * this drops to zero.
- */
- buf->refcount++;
-
- gen_perf_query_result_clear(&query->oa.result);
- query->oa.results_accumulated = false;
-
- add_to_unaccumulated_query_list(perf_ctx, query);
- break;
- }
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- if (query->pipeline_stats.bo) {
- perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
- query->pipeline_stats.bo = NULL;
- }
-
- query->pipeline_stats.bo =
- perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
- "perf. query pipeline stats bo",
- STATS_BO_SIZE);
-
- /* Take starting snapshots. */
- gen_perf_snapshot_statistics_registers(perf_ctx->ctx , perf_cfg, query, 0);
-
- ++perf_ctx->n_active_pipeline_stats_queries;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- return true;
-}
-
-void
-gen_perf_end_query(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *query)
-{
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
-
- /* Ensure that the work associated with the queried commands will have
- * finished before taking our query end counter readings.
- *
- * For more details see comment in brw_begin_perf_query for
- * corresponding flush.
- */
- perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
-
- switch (query->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
-
- /* NB: It's possible that the query will have already been marked
- * as 'accumulated' if an error was seen while reading samples
- * from perf. In this case we mustn't try and emit a closing
- * MI_RPC command in case the OA unit has already been disabled
- */
- if (!query->oa.results_accumulated) {
- /* Take an ending OA counter snapshot. */
- perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
- MI_FREQ_END_OFFSET_BYTES);
- perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
- MI_RPC_BO_END_OFFSET_BYTES,
- query->oa.begin_report_id + 1);
- }
-
- --perf_ctx->n_active_oa_queries;
-
- /* NB: even though the query has now ended, it can't be accumulated
- * until the end MI_REPORT_PERF_COUNT snapshot has been written
- * to query->oa.bo
- */
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- gen_perf_snapshot_statistics_registers(perf_ctx->ctx, perf_cfg, query,
- STATS_BO_END_OFFSET_BYTES);
- --perf_ctx->n_active_pipeline_stats_queries;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-}
-
-enum OaReadStatus {
- OA_READ_STATUS_ERROR,
- OA_READ_STATUS_UNFINISHED,
- OA_READ_STATUS_FINISHED,
-};
-
-static enum OaReadStatus
-read_oa_samples_until(struct gen_perf_context *perf_ctx,
- uint32_t start_timestamp,
- uint32_t end_timestamp)
-{
- struct exec_node *tail_node =
- exec_list_get_tail(&perf_ctx->sample_buffers);
- struct oa_sample_buf *tail_buf =
- exec_node_data(struct oa_sample_buf, tail_node, link);
- uint32_t last_timestamp = tail_buf->last_timestamp;
-
- while (1) {
- struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(perf_ctx);
- uint32_t offset;
- int len;
-
- while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
- sizeof(buf->buf))) < 0 && errno == EINTR)
- ;
-
- if (len <= 0) {
- exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
-
- if (len < 0) {
- if (errno == EAGAIN)
- return ((last_timestamp - start_timestamp) >=
- (end_timestamp - start_timestamp)) ?
- OA_READ_STATUS_FINISHED :
- OA_READ_STATUS_UNFINISHED;
- else {
- DBG("Error reading i915 perf samples: %m\n");
- }
- } else
- DBG("Spurious EOF reading i915 perf samples\n");
-
- return OA_READ_STATUS_ERROR;
- }
-
- buf->len = len;
- exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
-
- /* Go through the reports and update the last timestamp. */
- offset = 0;
- while (offset < buf->len) {
- const struct drm_i915_perf_record_header *header =
- (const struct drm_i915_perf_record_header *) &buf->buf[offset];
- uint32_t *report = (uint32_t *) (header + 1);
-
- if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
- last_timestamp = report[1];
-
- offset += header->size;
- }
-
- buf->last_timestamp = last_timestamp;
- }
-
- unreachable("not reached");
- return OA_READ_STATUS_ERROR;
-}
-
-/**
- * Try to read all the reports until either the delimiting timestamp
- * or an error arises.
- */
-static bool
-read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *query,
- void *current_batch)
-{
- uint32_t *start;
- uint32_t *last;
- uint32_t *end;
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
-
- /* We need the MI_REPORT_PERF_COUNT to land before we can start
- * accumulate. */
- assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
- !perf_cfg->vtbl.bo_busy(query->oa.bo));
-
- /* Map the BO once here and let accumulate_oa_reports() unmap
- * it. */
- if (query->oa.map == NULL)
- query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
-
- start = last = query->oa.map;
- end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- if (start[0] != query->oa.begin_report_id) {
- DBG("Spurious start report id=%"PRIu32"\n", start[0]);
- return true;
- }
- if (end[0] != (query->oa.begin_report_id + 1)) {
- DBG("Spurious end report id=%"PRIu32"\n", end[0]);
- return true;
- }
-
- /* Read the reports until the end timestamp. */
- switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
- case OA_READ_STATUS_ERROR:
- /* Fallthrough and let accumulate_oa_reports() deal with the
- * error. */
- case OA_READ_STATUS_FINISHED:
- return true;
- case OA_READ_STATUS_UNFINISHED:
- return false;
- }
-
- unreachable("invalid read status");
- return false;
-}
-
-void
-gen_perf_wait_query(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *query,
- void *current_batch)
-{
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
- struct brw_bo *bo = NULL;
-
- switch (query->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- bo = query->oa.bo;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- bo = query->pipeline_stats.bo;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- if (bo == NULL)
- return;
-
- /* If the current batch references our results bo then we need to
- * flush first...
- */
- if (perf_cfg->vtbl.batch_references(current_batch, bo))
- perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
-
- perf_cfg->vtbl.bo_wait_rendering(bo);
-
- /* Due to a race condition between the OA unit signaling report
- * availability and the report actually being written into memory,
- * we need to wait for all the reports to come in before we can
- * read them.
- */
- if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
- query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
- while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
- ;
- }
-}
-
-bool
-gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
- struct gen_perf_query_object *query,
- void *current_batch)
-{
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
-
- switch (query->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- return (query->oa.results_accumulated ||
- (query->oa.bo &&
- !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
- !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
- read_oa_samples_for_query(perf_ctx, query, current_batch)));
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- return (query->pipeline_stats.bo &&
- !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
- !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- return false;
+ if (oa_metrics)
+ gen_perf_register_mdapi_oa_query(perf_cfg, devinfo);
}