i965: Drop assert about number of uniforms in ARB handling.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
index dd392b1960cfc887b71d46b8a96f60cc203b6176..ff9272999d1b21a23895fb45dc616bff4654b405 100644 (file)
 #include "util/ralloc.h"
 #include "util/hash_table.h"
 #include "util/list.h"
+#include "util/u_math.h"
 
 #include "brw_context.h"
 #include "brw_defines.h"
 #include "brw_performance_query.h"
-#include "brw_oa_hsw.h"
-#include "brw_oa_bdw.h"
-#include "brw_oa_chv.h"
-#include "brw_oa_sklgt2.h"
-#include "brw_oa_sklgt3.h"
-#include "brw_oa_sklgt4.h"
-#include "brw_oa_bxt.h"
+#include "brw_oa_metrics.h"
 #include "intel_batchbuffer.h"
 
 #define FILE_DEBUG_FLAG DEBUG_PERFMON
 
-/*
- * The largest OA formats we can use include:
- * For Haswell:
- *   1 timestamp, 45 A counters, 8 B counters and 8 C counters.
- * For Gen8+
- *   1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
- */
-#define MAX_OA_REPORT_COUNTERS 62
-
 #define OAREPORT_REASON_MASK           0x3f
 #define OAREPORT_REASON_SHIFT          19
 #define OAREPORT_REASON_TIMER          (1<<0)
@@ -219,86 +205,61 @@ struct brw_oa_sample_buf {
    int refcount;
    int len;
    uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
+   uint32_t last_timestamp;
 };
 
-/**
- * i965 representation of a performance query object.
- *
- * NB: We want to keep this structure relatively lean considering that
- * applications may expect to allocate enough objects to be able to
- * query around all draw calls in a frame.
- */
-struct brw_perf_query_object
+/** Downcasting convenience macro. */
+static inline struct brw_perf_query_object *
+brw_perf_query(struct gl_perf_query_object *o)
 {
-   struct gl_perf_query_object base;
-
-   const struct brw_perf_query_info *query;
-
-   /* See query->kind to know which state below is in use... */
-   union {
-      struct {
+   return (struct brw_perf_query_object *) o;
+}
 
-         /**
-          * BO containing OA counter snapshots at query Begin/End time.
-          */
-         struct brw_bo *bo;
-
-         /**
-          * The MI_REPORT_PERF_COUNT command lets us specify a unique
-          * ID that will be reflected in the resulting OA report
-          * that's written by the GPU. This is the ID we're expecting
-          * in the begin report and the the end report should be
-          * @begin_report_id + 1.
-          */
-         int begin_report_id;
+#define MI_RPC_BO_SIZE              4096
+#define MI_RPC_BO_END_OFFSET_BYTES  (MI_RPC_BO_SIZE / 2)
+#define MI_FREQ_START_OFFSET_BYTES  (3072)
+#define MI_FREQ_END_OFFSET_BYTES    (3076)
 
-         /**
-          * Reference the head of the brw->perfquery.sample_buffers
-          * list at the time that the query started (so we only need
-          * to look at nodes after this point when looking for samples
-          * related to this query)
-          *
-          * (See struct brw_oa_sample_buf description for more details)
-          */
-         struct exec_node *samples_head;
+/******************************************************************************/
 
-         /**
-          * Storage for the final accumulated OA counters.
-          */
-         uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
+static bool
+read_file_uint64(const char *file, uint64_t *val)
+{
+    char buf[32];
+    int fd, n;
 
-         /**
-          * false while in the unaccumulated_elements list, and set to
-          * true when the final, end MI_RPC snapshot has been
-          * accumulated.
-          */
-         bool results_accumulated;
+    fd = open(file, 0);
+    if (fd < 0)
+       return false;
+    while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
+           errno == EINTR);
+    close(fd);
+    if (n < 0)
+       return false;
 
-      } oa;
+    buf[n] = '\0';
+    *val = strtoull(buf, NULL, 0);
 
-      struct {
-         /**
-          * BO containing starting and ending snapshots for the
-          * statistics counters.
-          */
-         struct brw_bo *bo;
-      } pipeline_stats;
-   };
-};
+    return true;
+}
 
-/** Downcasting convenience macro. */
-static inline struct brw_perf_query_object *
-brw_perf_query(struct gl_perf_query_object *o)
+static bool
+read_sysfs_drm_device_file_uint64(struct brw_context *brw,
+                                  const char *file,
+                                  uint64_t *value)
 {
-   return (struct brw_perf_query_object *) o;
-}
+   char buf[512];
+   int len;
 
-#define STATS_BO_SIZE               4096
-#define STATS_BO_END_OFFSET_BYTES   (STATS_BO_SIZE / 2)
-#define MAX_STAT_COUNTERS           (STATS_BO_END_OFFSET_BYTES / 8)
+   len = snprintf(buf, sizeof(buf), "%s/%s",
+                  brw->perfquery.sysfs_dev_dir, file);
+   if (len < 0 || len >= sizeof(buf)) {
+      DBG("Failed to concatenate sys filename to read u64 from\n");
+      return false;
+   }
 
-#define MI_RPC_BO_SIZE              4096
-#define MI_RPC_BO_END_OFFSET_BYTES  (MI_RPC_BO_SIZE / 2)
+   return read_file_uint64(buf, value);
+}
 
 /******************************************************************************/
 
@@ -306,6 +267,44 @@ static bool
 brw_is_perf_query_ready(struct gl_context *ctx,
                         struct gl_perf_query_object *o);
 
+static uint64_t
+brw_perf_query_get_metric_id(struct brw_context *brw,
+                             const struct brw_perf_query_info *query)
+{
+   /* These queries are know not to ever change, their config ID has been
+    * loaded upon the first query creation. No need to look them up again.
+    */
+   if (query->kind == OA_COUNTERS)
+      return query->oa_metrics_set_id;
+
+   assert(query->kind == OA_COUNTERS_RAW);
+
+   /* Raw queries can be reprogrammed up by an external application/library.
+    * When a raw query is used for the first time it's id is set to a value !=
+    * 0. When it stops being used the id returns to 0. No need to reload the
+    * ID when it's already loaded.
+    */
+   if (query->oa_metrics_set_id != 0) {
+      DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
+          query->name, query->guid, query->oa_metrics_set_id);
+      return query->oa_metrics_set_id;
+   }
+
+   char metric_id_file[280];
+   snprintf(metric_id_file, sizeof(metric_id_file),
+            "%s/metrics/%s/id", brw->perfquery.sysfs_dev_dir, query->guid);
+
+   struct brw_perf_query_info *raw_query = (struct brw_perf_query_info *)query;
+   if (!read_file_uint64(metric_id_file, &raw_query->oa_metrics_set_id)) {
+      DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
+      raw_query->oa_metrics_set_id = 1ULL;
+   } else {
+      DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
+          query->name, query->guid, query->oa_metrics_set_id);
+   }
+   return query->oa_metrics_set_id;
+}
+
 static void
 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
 {
@@ -315,6 +314,7 @@ dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
       DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
           id,
           o->Used ? "Dirty," : "New,",
@@ -330,6 +330,9 @@ dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
           o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
           obj->pipeline_stats.bo ? "yes" : "no");
       break;
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 }
 
@@ -420,12 +423,17 @@ brw_get_perf_query_info(struct gl_context *ctx,
 
    switch (query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
       *n_active = brw->perfquery.n_active_oa_queries;
       break;
 
    case PIPELINE_STATS:
       *n_active = brw->perfquery.n_active_pipeline_stats_queries;
       break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 }
 
@@ -549,45 +557,6 @@ drop_from_unaccumulated_query_list(struct brw_context *brw,
    reap_old_sample_buffers(brw);
 }
 
-static uint64_t
-timebase_scale(struct brw_context *brw, uint32_t u32_time_delta)
-{
-   const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   uint64_t tmp = ((uint64_t)u32_time_delta) * 1000000000ull;
-
-   return tmp ? tmp / devinfo->timestamp_frequency : 0;
-}
-
-static void
-accumulate_uint32(const uint32_t *report0,
-                  const uint32_t *report1,
-                  uint64_t *accumulator)
-{
-   *accumulator += (uint32_t)(*report1 - *report0);
-}
-
-static void
-accumulate_uint40(int a_index,
-                  const uint32_t *report0,
-                  const uint32_t *report1,
-                  uint64_t *accumulator)
-{
-   const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
-   const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
-   uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
-   uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
-   uint64_t value0 = report0[a_index + 4] | high0;
-   uint64_t value1 = report1[a_index + 4] | high1;
-   uint64_t delta;
-
-   if (value0 > value1)
-      delta = (1ULL << 40) + value1 - value0;
-   else
-      delta = value1 - value0;
-
-   *accumulator += delta;
-}
-
 /**
  * Given pointers to starting and ending OA snapshots, add the deltas for each
  * counter to the results.
@@ -603,29 +572,33 @@ add_deltas(struct brw_context *brw,
    int idx = 0;
    int i;
 
+   obj->oa.reports_accumulated++;
+
    switch (query->oa_format) {
    case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
-      accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
-      accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
+      brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
+      brw_perf_query_accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
 
       /* 32x 40bit A counters... */
       for (i = 0; i < 32; i++)
-         accumulate_uint40(i, start, end, accumulator + idx++);
+         brw_perf_query_accumulate_uint40(i, start, end, accumulator + idx++);
 
       /* 4x 32bit A counters... */
       for (i = 0; i < 4; i++)
-         accumulate_uint32(start + 36 + i, end + 36 + i, accumulator + idx++);
+         brw_perf_query_accumulate_uint32(start + 36 + i, end + 36 + i,
+                                          accumulator + idx++);
 
       /* 8x 32bit B counters + 8x 32bit C counters... */
       for (i = 0; i < 16; i++)
-         accumulate_uint32(start + 48 + i, end + 48 + i, accumulator + idx++);
+         brw_perf_query_accumulate_uint32(start + 48 + i, end + 48 + i,
+                                          accumulator + idx++);
 
       break;
    case I915_OA_FORMAT_A45_B8_C8:
-      accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
+      brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
 
       for (i = 0; i < 61; i++)
-         accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
+         brw_perf_query_accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
 
       break;
    default:
@@ -681,11 +654,26 @@ discard_all_queries(struct brw_context *brw)
    }
 }
 
-static bool
-read_oa_samples(struct brw_context *brw)
+enum OaReadStatus {
+   OA_READ_STATUS_ERROR,
+   OA_READ_STATUS_UNFINISHED,
+   OA_READ_STATUS_FINISHED,
+};
+
+static enum OaReadStatus
+read_oa_samples_until(struct brw_context *brw,
+                      uint32_t start_timestamp,
+                      uint32_t end_timestamp)
 {
+   struct exec_node *tail_node =
+      exec_list_get_tail(&brw->perfquery.sample_buffers);
+   struct brw_oa_sample_buf *tail_buf =
+      exec_node_data(struct brw_oa_sample_buf, tail_node, link);
+   uint32_t last_timestamp = tail_buf->last_timestamp;
+
    while (1) {
       struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
+      uint32_t offset;
       int len;
 
       while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
@@ -697,28 +685,94 @@ read_oa_samples(struct brw_context *brw)
 
          if (len < 0) {
             if (errno == EAGAIN)
-               return true;
+               return ((last_timestamp - start_timestamp) >=
+                       (end_timestamp - start_timestamp)) ?
+                      OA_READ_STATUS_FINISHED :
+                      OA_READ_STATUS_UNFINISHED;
             else {
                DBG("Error reading i915 perf samples: %m\n");
-               return false;
             }
-         } else {
+         } else
             DBG("Spurious EOF reading i915 perf samples\n");
-            return false;
-         }
+
+         return OA_READ_STATUS_ERROR;
       }
 
       buf->len = len;
       exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
+
+      /* Go through the reports and update the last timestamp. */
+      offset = 0;
+      while (offset < buf->len) {
+         const struct drm_i915_perf_record_header *header =
+            (const struct drm_i915_perf_record_header *) &buf->buf[offset];
+         uint32_t *report = (uint32_t *) (header + 1);
+
+         if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
+            last_timestamp = report[1];
+
+         offset += header->size;
+      }
+
+      buf->last_timestamp = last_timestamp;
    }
 
    unreachable("not reached");
+   return OA_READ_STATUS_ERROR;
+}
+
+/**
+ * Try to read all the reports until either the delimiting timestamp
+ * or an error arises.
+ */
+static bool
+read_oa_samples_for_query(struct brw_context *brw,
+                          struct brw_perf_query_object *obj)
+{
+   uint32_t *start;
+   uint32_t *last;
+   uint32_t *end;
+
+   /* We need the MI_REPORT_PERF_COUNT to land before we can start
+    * accumulate. */
+   assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
+          !brw_bo_busy(obj->oa.bo));
+
+   /* Map the BO once here and let accumulate_oa_reports() unmap
+    * it. */
+   if (obj->oa.map == NULL)
+      obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
+
+   start = last = obj->oa.map;
+   end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
+
+   if (start[0] != obj->oa.begin_report_id) {
+      DBG("Spurious start report id=%"PRIu32"\n", start[0]);
+      return true;
+   }
+   if (end[0] != (obj->oa.begin_report_id + 1)) {
+      DBG("Spurious end report id=%"PRIu32"\n", end[0]);
+      return true;
+   }
+
+   /* Read the reports until the end timestamp. */
+   switch (read_oa_samples_until(brw, start[1], end[1])) {
+   case OA_READ_STATUS_ERROR:
+      /* Fallthrough and let accumulate_oa_reports() deal with the
+       * error. */
+   case OA_READ_STATUS_FINISHED:
+      return true;
+   case OA_READ_STATUS_UNFINISHED:
+      return false;
+   }
+
+   unreachable("invalid read status");
    return false;
 }
 
 /**
- * Accumulate raw OA counter values based on deltas between pairs
- * of OA reports.
+ * Accumulate raw OA counter values based on deltas between pairs of
+ * OA reports.
  *
  * Accumulation starts from the first report captured via
  * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
@@ -738,25 +792,20 @@ static void
 accumulate_oa_reports(struct brw_context *brw,
                       struct brw_perf_query_object *obj)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_perf_query_object *o = &obj->base;
-   uint32_t *query_buffer;
    uint32_t *start;
    uint32_t *last;
    uint32_t *end;
    struct exec_node *first_samples_node;
    bool in_ctx = true;
-   uint32_t ctx_id;
+   int out_duration = 0;
 
    assert(o->Ready);
+   assert(obj->oa.map != NULL);
 
-   /* Collect the latest periodic OA reports from i915 perf */
-   if (!read_oa_samples(brw))
-      goto error;
-
-   query_buffer = brw_bo_map(brw, obj->oa.bo, MAP_READ);
-
-   start = last = query_buffer;
-   end = query_buffer + (MI_RPC_BO_END_OFFSET_BYTES / sizeof(uint32_t));
+   start = last = obj->oa.map;
+   end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
 
    if (start[0] != obj->oa.begin_report_id) {
       DBG("Spurious start report id=%"PRIu32"\n", start[0]);
@@ -767,7 +816,7 @@ accumulate_oa_reports(struct brw_context *brw,
       goto error;
    }
 
-   ctx_id = start[2];
+   obj->oa.hw_id = start[2];
 
    /* See if we have any periodic reports to accumulate too... */
 
@@ -803,13 +852,13 @@ accumulate_oa_reports(struct brw_context *brw,
             /* Ignore reports that come before the start marker.
              * (Note: takes care to allow overflow of 32bit timestamps)
              */
-            if (timebase_scale(brw, report[1] - start[1]) > 5000000000)
+            if (brw_timebase_scale(brw, report[1] - start[1]) > 5000000000)
                continue;
 
             /* Ignore reports that come after the end marker.
              * (Note: takes care to allow overflow of 32bit timestamps)
              */
-            if (timebase_scale(brw, report[1] - end[1]) <= 5000000000)
+            if (brw_timebase_scale(brw, report[1] - end[1]) <= 5000000000)
                goto end;
 
             /* For Gen8+ since the counters continue while other
@@ -821,21 +870,35 @@ accumulate_oa_reports(struct brw_context *brw,
              * For Haswell we can rely on the HW to stop the progress
              * of OA counters while any other context is acctive.
              */
-            if (brw->gen >= 8) {
-               if (in_ctx && report[2] != ctx_id) {
+            if (devinfo->gen >= 8) {
+               if (in_ctx && report[2] != obj->oa.hw_id) {
                   DBG("i915 perf: Switch AWAY (observed by ID change)\n");
                   in_ctx = false;
-               } else if (in_ctx == false && report[2] == ctx_id) {
+                  out_duration = 0;
+               } else if (in_ctx == false && report[2] == obj->oa.hw_id) {
                   DBG("i915 perf: Switch TO\n");
                   in_ctx = true;
-                  add = false;
+
+                  /* From experimentation in IGT, we found that the OA unit
+                   * might label some report as "idle" (using an invalid
+                   * context ID), right after a report for a given context.
+                   * Deltas generated by those reports actually belong to the
+                   * previous context, even though they're not labelled as
+                   * such.
+                   *
+                   * We didn't *really* Switch AWAY in the case that we e.g.
+                   * saw a single periodic report while idle...
+                   */
+                  if (out_duration >= 1)
+                     add = false;
                } else if (in_ctx) {
-                  assert(report[2] == ctx_id);
+                  assert(report[2] == obj->oa.hw_id);
                   DBG("i915 perf: Continuation IN\n");
                } else {
-                  assert(report[2] != ctx_id);
+                  assert(report[2] != obj->oa.hw_id);
                   DBG("i915 perf: Continuation OUT\n");
                   add = false;
+                  out_duration++;
                }
             }
 
@@ -863,7 +926,6 @@ end:
 
    DBG("Marking %d accumulated - results gathered\n", o->Id);
 
-   brw_bo_unmap(obj->oa.bo);
    obj->oa.results_accumulated = true;
    drop_from_unaccumulated_query_list(brw, obj);
    dec_n_oa_users(brw);
@@ -872,7 +934,6 @@ end:
 
 error:
 
-   brw_bo_unmap(obj->oa.bo);
    discard_all_queries(brw);
 }
 
@@ -920,12 +981,33 @@ open_i915_perf_oa_stream(struct brw_context *brw,
 }
 
 static void
-close_perf(struct brw_context *brw)
+close_perf(struct brw_context *brw,
+           const struct brw_perf_query_info *query)
 {
    if (brw->perfquery.oa_stream_fd != -1) {
       close(brw->perfquery.oa_stream_fd);
       brw->perfquery.oa_stream_fd = -1;
    }
+   if (query->kind == OA_COUNTERS_RAW) {
+      struct brw_perf_query_info *raw_query =
+         (struct brw_perf_query_info *) query;
+      raw_query->oa_metrics_set_id = 0;
+   }
+}
+
+static void
+capture_frequency_stat_register(struct brw_context *brw,
+                                struct brw_bo *bo,
+                                uint32_t bo_offset)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (devinfo->gen >= 7 && devinfo->gen <= 8 &&
+       !devinfo->is_baytrail && !devinfo->is_cherryview) {
+      brw_store_register_mem32(brw, bo, GEN7_RPSTAT1, bo_offset);
+   } else if (devinfo->gen >= 9) {
+      brw_store_register_mem32(brw, bo, GEN9_RPSTAT0, bo_offset);
+   }
 }
 
 /**
@@ -998,6 +1080,7 @@ brw_begin_perf_query(struct gl_context *ctx,
 
    switch (query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW: {
 
       /* Opening an i915 perf stream implies exclusive access to the OA unit
        * which will generate counter reports for a specific counter set with a
@@ -1005,14 +1088,17 @@ brw_begin_perf_query(struct gl_context *ctx,
        * require a different counter set or format unless we get an opportunity
        * to close the stream and open a new one...
        */
+      uint64_t metric_id = brw_perf_query_get_metric_id(brw, query);
+
       if (brw->perfquery.oa_stream_fd != -1 &&
-          brw->perfquery.current_oa_metrics_set_id !=
-          query->oa_metrics_set_id) {
+          brw->perfquery.current_oa_metrics_set_id != metric_id) {
 
-         if (brw->perfquery.n_oa_users != 0)
+         if (brw->perfquery.n_oa_users != 0) {
+            DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64"\n",
+                o->Id, brw->perfquery.current_oa_metrics_set_id, metric_id);
             return false;
-         else
-            close_perf(brw);
+         else
+            close_perf(brw, query);
       }
 
       /* If the OA counters aren't already on, enable them. */
@@ -1074,17 +1160,15 @@ brw_begin_perf_query(struct gl_context *ctx,
              prev_sample_period / 1000000ul);
 
          if (!open_i915_perf_oa_stream(brw,
-                                       query->oa_metrics_set_id,
+                                       metric_id,
                                        query->oa_format,
                                        period_exponent,
                                        screen->fd, /* drm fd */
                                        brw->hw_ctx))
             return false;
       } else {
-         assert(brw->perfquery.current_oa_metrics_set_id ==
-                query->oa_metrics_set_id &&
-                brw->perfquery.current_oa_format ==
-                query->oa_format);
+         assert(brw->perfquery.current_oa_metrics_set_id == metric_id &&
+                brw->perfquery.current_oa_format == query->oa_format);
       }
 
       if (!inc_n_oa_users(brw)) {
@@ -1098,8 +1182,8 @@ brw_begin_perf_query(struct gl_context *ctx,
       }
 
       obj->oa.bo =
-         brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
-                      MI_RPC_BO_SIZE, 64);
+         brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE,
+                      BRW_MEMZONE_OTHER);
 #ifdef DEBUG
       /* Pre-filling the BO helps debug whether writes landed. */
       void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
@@ -1110,9 +1194,19 @@ brw_begin_perf_query(struct gl_context *ctx,
       obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
       brw->perfquery.next_query_start_report_id += 2;
 
+      /* We flush the batchbuffer here to minimize the chances that MI_RPC
+       * delimiting commands end up in different batchbuffers. If that's the
+       * case, the measurement will include the time it takes for the kernel
+       * scheduler to load a new request into the hardware. This is manifested in
+       * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
+       */
+      intel_batchbuffer_flush(brw);
+
       /* Take a starting OA counter snapshot. */
       brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
                                           obj->oa.begin_report_id);
+      capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_START_OFFSET_BYTES);
+
       ++brw->perfquery.n_active_oa_queries;
 
       /* No already-buffered samples can possibly be associated with this query
@@ -1132,11 +1226,13 @@ brw_begin_perf_query(struct gl_context *ctx,
        */
       buf->refcount++;
 
+      obj->oa.hw_id = 0xffffffff;
       memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
       obj->oa.results_accumulated = false;
 
       add_to_unaccumulated_query_list(brw, obj);
       break;
+   }
 
    case PIPELINE_STATS:
       if (obj->pipeline_stats.bo) {
@@ -1146,13 +1242,17 @@ brw_begin_perf_query(struct gl_context *ctx,
 
       obj->pipeline_stats.bo =
          brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
-                            STATS_BO_SIZE, 64);
+                      STATS_BO_SIZE, BRW_MEMZONE_OTHER);
 
       /* Take starting snapshots. */
       snapshot_statistics_registers(brw, obj, 0);
 
       ++brw->perfquery.n_active_pipeline_stats_queries;
       break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 
    if (INTEL_DEBUG & DEBUG_PERFMON)
@@ -1183,6 +1283,7 @@ brw_end_perf_query(struct gl_context *ctx,
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
 
       /* NB: It's possible that the query will have already been marked
        * as 'accumulated' if an error was seen while reading samples
@@ -1191,19 +1292,12 @@ brw_end_perf_query(struct gl_context *ctx,
        */
       if (!obj->oa.results_accumulated) {
          /* Take an ending OA counter snapshot. */
+         capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_END_OFFSET_BYTES);
          brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
                                              MI_RPC_BO_END_OFFSET_BYTES,
                                              obj->oa.begin_report_id + 1);
       }
 
-      /* We flush the batchbuffer here to minimize the chances that MI_RPC
-       * delimiting commands end up in different batchbuffers. If that's the
-       * case, the measurement will include the time it takes for the kernel
-       * scheduler to load a new request into the hardware. This is manifested
-       * in tools like frameretrace by spikes in the "GPU Core Clocks"
-       * counter.
-       */
-      intel_batchbuffer_flush(brw);
       --brw->perfquery.n_active_oa_queries;
 
       /* NB: even though the query has now ended, it can't be accumulated
@@ -1217,6 +1311,10 @@ brw_end_perf_query(struct gl_context *ctx,
                                     STATS_BO_END_OFFSET_BYTES);
       --brw->perfquery.n_active_pipeline_stats_queries;
       break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 }
 
@@ -1231,12 +1329,17 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
       bo = obj->oa.bo;
       break;
 
    case PIPELINE_STATS:
       bo = obj->pipeline_stats.bo;
       break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 
    if (bo == NULL)
@@ -1248,7 +1351,18 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
    if (brw_batch_references(&brw->batch, bo))
       intel_batchbuffer_flush(brw);
 
-   brw_bo_wait_rendering(brw, bo);
+   brw_bo_wait_rendering(bo);
+
+   /* Due to a race condition between the OA unit signaling report
+    * availability and the report actually being written into memory,
+    * we need to wait for all the reports to come in before we can
+    * read them.
+    */
+   if (obj->query->kind == OA_COUNTERS ||
+       obj->query->kind == OA_COUNTERS_RAW) {
+      while (!read_oa_samples_for_query(brw, obj))
+         ;
+   }
 }
 
 static bool
@@ -1263,21 +1377,112 @@ brw_is_perf_query_ready(struct gl_context *ctx,
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
       return (obj->oa.results_accumulated ||
               (obj->oa.bo &&
                !brw_batch_references(&brw->batch, obj->oa.bo) &&
-               !brw_bo_busy(obj->oa.bo)));
-
+               !brw_bo_busy(obj->oa.bo) &&
+               read_oa_samples_for_query(brw, obj)));
    case PIPELINE_STATS:
       return (obj->pipeline_stats.bo &&
               !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
               !brw_bo_busy(obj->pipeline_stats.bo));
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 
-   unreachable("missing ready check for unknown query kind");
    return false;
 }
 
+static void
+gen8_read_report_clock_ratios(const uint32_t *report,
+                              uint64_t *slice_freq_hz,
+                              uint64_t *unslice_freq_hz)
+{
+   /* The lower 16bits of the RPT_ID field of the OA reports contains a
+    * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
+    * divided this way :
+    *
+    * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
+    * RPT_ID[10:9]:  RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
+    * RPT_ID[8:0]:   RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
+    *
+    * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
+    *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
+    *
+    * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
+    *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
+    */
+
+   uint32_t unslice_freq = report[0] & 0x1ff;
+   uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
+   uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
+   uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
+
+   *slice_freq_hz = slice_freq * 16666667ULL;
+   *unslice_freq_hz = unslice_freq * 16666667ULL;
+}
+
+static void
+read_slice_unslice_frequencies(struct brw_context *brw,
+                               struct brw_perf_query_object *obj)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   uint32_t *begin_report, *end_report;
+
+   /* Slice/Unslice frequency is only available in the OA reports when the
+    * "Disable OA reports due to clock ratio change" field in
+    * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
+    * global register (see drivers/gpu/drm/i915/i915_perf.c)
+    *
+    * Documentation says this should be available on Gen9+ but experimentation
+    * shows that Gen8 reports similar values, so we enable it there too.
+    */
+   if (devinfo->gen < 8)
+      return;
+
+   begin_report = obj->oa.map;
+   end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
+
+   gen8_read_report_clock_ratios(begin_report,
+                                 &obj->oa.slice_frequency[0],
+                                 &obj->oa.unslice_frequency[0]);
+   gen8_read_report_clock_ratios(end_report,
+                                 &obj->oa.slice_frequency[1],
+                                 &obj->oa.unslice_frequency[1]);
+}
+
+static void
+read_gt_frequency(struct brw_context *brw,
+                  struct brw_perf_query_object *obj)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
+      end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
+
+   switch (devinfo->gen) {
+   case 7:
+   case 8:
+      obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
+      obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
+      break;
+   case 9:
+   case 10:
+   case 11:
+      obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
+      obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
+      break;
+   default:
+      unreachable("unexpected gen");
+   }
+
+   /* Put the numbers into Hz. */
+   obj->oa.gt_frequency[0] *= 1000000ULL;
+   obj->oa.gt_frequency[1] *= 1000000ULL;
+}
+
 static int
 get_oa_counter_data(struct brw_context *brw,
                     struct brw_perf_query_object *obj,
@@ -1288,11 +1493,6 @@ get_oa_counter_data(struct brw_context *brw,
    int n_counters = query->n_counters;
    int written = 0;
 
-   if (!obj->oa.results_accumulated) {
-      accumulate_oa_reports(brw, obj);
-      assert(obj->oa.results_accumulated);
-   }
-
    for (int i = 0; i < n_counters; i++) {
       const struct brw_perf_query_counter *counter = &query->counters[i];
       uint64_t *out_uint64;
@@ -1382,12 +1582,29 @@ brw_get_perf_query_data(struct gl_context *ctx,
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
-      written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
+   case OA_COUNTERS_RAW:
+      if (!obj->oa.results_accumulated) {
+         read_gt_frequency(brw, obj);
+         read_slice_unslice_frequencies(brw, obj);
+         accumulate_oa_reports(brw, obj);
+         assert(obj->oa.results_accumulated);
+
+         brw_bo_unmap(obj->oa.bo);
+         obj->oa.map = NULL;
+      }
+      if (obj->query->kind == OA_COUNTERS)
+         written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
+      else
+         written = brw_perf_query_get_mdapi_oa_data(brw, obj, data_size, (uint8_t *)data);
       break;
 
    case PIPELINE_STATS:
       written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
       break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
    }
 
    if (bytes_written)
@@ -1434,6 +1651,7 @@ brw_delete_perf_query(struct gl_context *ctx,
 
    switch (obj->query->kind) {
    case OA_COUNTERS:
+   case OA_COUNTERS_RAW:
       if (obj->oa.bo) {
          if (!obj->oa.results_accumulated) {
             drop_from_unaccumulated_query_list(brw, obj);
@@ -1453,9 +1671,11 @@ brw_delete_perf_query(struct gl_context *ctx,
          obj->pipeline_stats.bo = NULL;
       }
       break;
-   }
 
-   free(obj);
+   default:
+      unreachable("Unknown query type");
+      break;
+   }
 
    /* As an indication that the INTEL_performance_query extension is no
     * longer in use, it's a good time to free our cache of sample
@@ -1463,60 +1683,19 @@ brw_delete_perf_query(struct gl_context *ctx,
     */
    if (--brw->perfquery.n_query_instances == 0) {
       free_sample_bufs(brw);
-      close_perf(brw);
+      close_perf(brw, obj->query);
    }
-}
-
-/******************************************************************************/
-
-static struct brw_perf_query_info *
-append_query_info(struct brw_context *brw)
-{
-   brw->perfquery.queries =
-      reralloc(brw, brw->perfquery.queries,
-               struct brw_perf_query_info, ++brw->perfquery.n_queries);
 
-   return &brw->perfquery.queries[brw->perfquery.n_queries - 1];
-}
-
-static void
-add_stat_reg(struct brw_perf_query_info *query,
-             uint32_t reg,
-             uint32_t numerator,
-             uint32_t denominator,
-             const char *name,
-             const char *description)
-{
-   struct brw_perf_query_counter *counter;
-
-   assert(query->n_counters < MAX_STAT_COUNTERS);
-
-   counter = &query->counters[query->n_counters];
-   counter->name = name;
-   counter->desc = description;
-   counter->type = GL_PERFQUERY_COUNTER_RAW_INTEL;
-   counter->data_type = GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
-   counter->size = sizeof(uint64_t);
-   counter->offset = sizeof(uint64_t) * query->n_counters;
-   counter->pipeline_stat.reg = reg;
-   counter->pipeline_stat.numerator = numerator;
-   counter->pipeline_stat.denominator = denominator;
-
-   query->n_counters++;
+   free(obj);
 }
 
-static void
-add_basic_stat_reg(struct brw_perf_query_info *query,
-                   uint32_t reg, const char *name)
-{
-   add_stat_reg(query, reg, 1, 1, name, name);
-}
+/******************************************************************************/
 
 static void
 init_pipeline_statistic_query_registers(struct brw_context *brw)
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   struct brw_perf_query_info *query = append_query_info(brw);
+   struct brw_perf_query_info *query = brw_perf_query_append_query_info(brw);
 
    query->kind = PIPELINE_STATS;
    query->name = "Pipeline Statistics Registers";
@@ -1524,108 +1703,102 @@ init_pipeline_statistic_query_registers(struct brw_context *brw)
    query->counters =
       rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
 
-   add_basic_stat_reg(query, IA_VERTICES_COUNT,
-                      "N vertices submitted");
-   add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
-                      "N primitives submitted");
-   add_basic_stat_reg(query, VS_INVOCATION_COUNT,
-                      "N vertex shader invocations");
+   brw_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
+                                          "N vertices submitted");
+   brw_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
+                                          "N primitives submitted");
+   brw_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
+                                          "N vertex shader invocations");
 
    if (devinfo->gen == 6) {
-      add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
-                   "SO_PRIM_STORAGE_NEEDED",
-                   "N geometry shader stream-out primitives (total)");
-      add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
-                   "SO_NUM_PRIMS_WRITTEN",
-                   "N geometry shader stream-out primitives (written)");
+      brw_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
+                                       "SO_PRIM_STORAGE_NEEDED",
+                                       "N geometry shader stream-out primitives (total)");
+      brw_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
+                                       "SO_NUM_PRIMS_WRITTEN",
+                                       "N geometry shader stream-out primitives (written)");
    } else {
-      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
-                   "SO_PRIM_STORAGE_NEEDED (Stream 0)",
-                   "N stream-out (stream 0) primitives (total)");
-      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
-                   "SO_PRIM_STORAGE_NEEDED (Stream 1)",
-                   "N stream-out (stream 1) primitives (total)");
-      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
-                   "SO_PRIM_STORAGE_NEEDED (Stream 2)",
-                   "N stream-out (stream 2) primitives (total)");
-      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
-                   "SO_PRIM_STORAGE_NEEDED (Stream 3)",
-                   "N stream-out (stream 3) primitives (total)");
-      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
-                   "SO_NUM_PRIMS_WRITTEN (Stream 0)",
-                   "N stream-out (stream 0) primitives (written)");
-      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
-                   "SO_NUM_PRIMS_WRITTEN (Stream 1)",
-                   "N stream-out (stream 1) primitives (written)");
-      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
-                   "SO_NUM_PRIMS_WRITTEN (Stream 2)",
-                   "N stream-out (stream 2) primitives (written)");
-      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
-                   "SO_NUM_PRIMS_WRITTEN (Stream 3)",
-                   "N stream-out (stream 3) primitives (written)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
+                                       "SO_PRIM_STORAGE_NEEDED (Stream 0)",
+                                       "N stream-out (stream 0) primitives (total)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
+                                       "SO_PRIM_STORAGE_NEEDED (Stream 1)",
+                                       "N stream-out (stream 1) primitives (total)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
+                                       "SO_PRIM_STORAGE_NEEDED (Stream 2)",
+                                       "N stream-out (stream 2) primitives (total)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
+                                       "SO_PRIM_STORAGE_NEEDED (Stream 3)",
+                                       "N stream-out (stream 3) primitives (total)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
+                                       "SO_NUM_PRIMS_WRITTEN (Stream 0)",
+                                       "N stream-out (stream 0) primitives (written)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
+                                       "SO_NUM_PRIMS_WRITTEN (Stream 1)",
+                                       "N stream-out (stream 1) primitives (written)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
+                                       "SO_NUM_PRIMS_WRITTEN (Stream 2)",
+                                       "N stream-out (stream 2) primitives (written)");
+      brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
+                                       "SO_NUM_PRIMS_WRITTEN (Stream 3)",
+                                       "N stream-out (stream 3) primitives (written)");
    }
 
-   add_basic_stat_reg(query, HS_INVOCATION_COUNT,
-                      "N TCS shader invocations");
-   add_basic_stat_reg(query, DS_INVOCATION_COUNT,
-                      "N TES shader invocations");
+   brw_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
+                                          "N TCS shader invocations");
+   brw_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
+                                          "N TES shader invocations");
 
-   add_basic_stat_reg(query, GS_INVOCATION_COUNT,
-                      "N geometry shader invocations");
-   add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
-                      "N geometry shader primitives emitted");
+   brw_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
+                                          "N geometry shader invocations");
+   brw_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
+                                          "N geometry shader primitives emitted");
 
-   add_basic_stat_reg(query, CL_INVOCATION_COUNT,
-                      "N primitives entering clipping");
-   add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
-                      "N primitives leaving clipping");
+   brw_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
+                                          "N primitives entering clipping");
+   brw_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
+                                          "N primitives leaving clipping");
 
    if (devinfo->is_haswell || devinfo->gen == 8)
-      add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
-                   "N fragment shader invocations",
-                   "N fragment shader invocations");
+      brw_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
+                                       "N fragment shader invocations",
+                                       "N fragment shader invocations");
    else
-      add_basic_stat_reg(query, PS_INVOCATION_COUNT,
-                         "N fragment shader invocations");
+      brw_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
+                                             "N fragment shader invocations");
 
-   add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
+   brw_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
 
    if (devinfo->gen >= 7)
-      add_basic_stat_reg(query, CS_INVOCATION_COUNT,
-                         "N compute shader invocations");
+      brw_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
+                                             "N compute shader invocations");
 
    query->data_size = sizeof(uint64_t) * query->n_counters;
 }
 
-static bool
-read_file_uint64(const char *file, uint64_t *val)
+static void
+register_oa_config(struct brw_context *brw,
+                   const struct brw_perf_query_info *query,
+                   uint64_t config_id)
 {
-    char buf[32];
-    int fd, n;
-
-    fd = open(file, 0);
-    if (fd < 0)
-       return false;
-    n = read(fd, buf, sizeof (buf) - 1);
-    close(fd);
-    if (n < 0)
-       return false;
-
-    buf[n] = '\0';
-    *val = strtoull(buf, NULL, 0);
+   struct brw_perf_query_info *registred_query =
+      brw_perf_query_append_query_info(brw);
 
-    return true;
+   *registred_query = *query;
+   registred_query->oa_metrics_set_id = config_id;
+   DBG("metric set registred: id = %" PRIu64", guid = %s\n",
+       registred_query->oa_metrics_set_id, query->guid);
 }
 
 static void
-enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
+enumerate_sysfs_metrics(struct brw_context *brw)
 {
    char buf[256];
    DIR *metricsdir = NULL;
    struct dirent *metric_entry;
    int len;
 
-   len = snprintf(buf, sizeof(buf), "%s/metrics", sysfs_dev_dir);
+   len = snprintf(buf, sizeof(buf), "%s/metrics", brw->perfquery.sysfs_dev_dir);
    if (len < 0 || len >= sizeof(buf)) {
       DBG("Failed to concatenate path to sysfs metrics/ directory\n");
       return;
@@ -1649,11 +1822,10 @@ enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
       entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
                                       metric_entry->d_name);
       if (entry) {
-         struct brw_perf_query_info *query;
          uint64_t id;
 
          len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
-                        sysfs_dev_dir, metric_entry->d_name);
+                        brw->perfquery.sysfs_dev_dir, metric_entry->d_name);
          if (len < 0 || len >= sizeof(buf)) {
             DBG("Failed to concatenate path to sysfs metric id file\n");
             continue;
@@ -1664,12 +1836,7 @@ enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
             continue;
          }
 
-         query = append_query_info(brw);
-         *query = *(struct brw_perf_query_info *)entry->data;
-         query->oa_metrics_set_id = id;
-
-         DBG("metric set known by mesa: id = %" PRIu64"\n",
-             query->oa_metrics_set_id);
+         register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
       } else
          DBG("metric set not known by mesa (skipping)\n");
    }
@@ -1678,145 +1845,212 @@ enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
 }
 
 static bool
-read_sysfs_drm_device_file_uint64(struct brw_context *brw,
-                                  const char *sysfs_dev_dir,
-                                  const char *file,
-                                  uint64_t *value)
+kernel_has_dynamic_config_support(struct brw_context *brw)
 {
-   char buf[512];
-   int len;
+   __DRIscreen *screen = brw->screen->driScrnPriv;
+   struct hash_entry *entry;
 
-   len = snprintf(buf, sizeof(buf), "%s/%s", sysfs_dev_dir, file);
-   if (len < 0 || len >= sizeof(buf)) {
-      DBG("Failed to concatenate sys filename to read u64 from\n");
-      return false;
+   hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
+      struct brw_perf_query_info *query = entry->data;
+      char config_path[280];
+      uint64_t config_id;
+
+      snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
+               brw->perfquery.sysfs_dev_dir, query->guid);
+
+      /* Look for the test config, which we know we can't replace. */
+      if (read_file_uint64(config_path, &config_id) && config_id == 1) {
+         return drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
+                         &config_id) < 0 && errno == ENOENT;
+      }
    }
 
-   return read_file_uint64(buf, value);
+   return false;
+}
+
+static void
+init_oa_configs(struct brw_context *brw)
+{
+   __DRIscreen *screen = brw->screen->driScrnPriv;
+   struct hash_entry *entry;
+
+   hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
+      const struct brw_perf_query_info *query = entry->data;
+      struct drm_i915_perf_oa_config config;
+      char config_path[280];
+      uint64_t config_id;
+      int ret;
+
+      snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
+               brw->perfquery.sysfs_dev_dir, query->guid);
+
+      /* Don't recreate already loaded configs. */
+      if (read_file_uint64(config_path, &config_id)) {
+         DBG("metric set: %s (already loaded)\n", query->guid);
+         register_oa_config(brw, query, config_id);
+         continue;
+      }
+
+      memset(&config, 0, sizeof(config));
+
+      memcpy(config.uuid, query->guid, sizeof(config.uuid));
+
+      config.n_mux_regs = query->n_mux_regs;
+      config.mux_regs_ptr = (uintptr_t) query->mux_regs;
+
+      config.n_boolean_regs = query->n_b_counter_regs;
+      config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
+
+      config.n_flex_regs = query->n_flex_regs;
+      config.flex_regs_ptr = (uintptr_t) query->flex_regs;
+
+      ret = drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
+      if (ret < 0) {
+         DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
+             query->name, query->guid, strerror(errno));
+         continue;
+      }
+
+      register_oa_config(brw, query, ret);
+      DBG("metric set: %s (added)\n", query->guid);
+   }
 }
 
 static bool
-init_oa_sys_vars(struct brw_context *brw, const char *sysfs_dev_dir)
+query_topology(struct brw_context *brw)
 {
-   const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
+   __DRIscreen *screen = brw->screen->driScrnPriv;
+   struct drm_i915_query_item item = {
+      .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
+   };
+   struct drm_i915_query query = {
+      .num_items = 1,
+      .items_ptr = (uintptr_t) &item,
+   };
 
-   if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
-                                          "gt_min_freq_mhz",
-                                          &min_freq_mhz))
+   if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
       return false;
 
-   if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
-                                          "gt_max_freq_mhz",
-                                          &max_freq_mhz))
+   struct drm_i915_query_topology_info *topo_info =
+      (struct drm_i915_query_topology_info *) calloc(1, item.length);
+   item.data_ptr = (uintptr_t) topo_info;
+
+   if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
+       item.length <= 0)
       return false;
 
-   brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
-   brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
-   brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
+   gen_device_info_update_from_topology(&brw->screen->devinfo,
+                                        topo_info);
 
-   if (devinfo->is_haswell) {
-      if (devinfo->gt == 1) {
-         brw->perfquery.sys_vars.n_eus = 10;
-         brw->perfquery.sys_vars.n_eu_slices = 1;
-         brw->perfquery.sys_vars.n_eu_sub_slices = 1;
-         brw->perfquery.sys_vars.slice_mask = 0x1;
-         brw->perfquery.sys_vars.subslice_mask = 0x1;
-      } else if (devinfo->gt == 2) {
-         brw->perfquery.sys_vars.n_eus = 20;
-         brw->perfquery.sys_vars.n_eu_slices = 1;
-         brw->perfquery.sys_vars.n_eu_sub_slices = 2;
-         brw->perfquery.sys_vars.slice_mask = 0x1;
-         brw->perfquery.sys_vars.subslice_mask = 0x3;
-      } else if (devinfo->gt == 3) {
-         brw->perfquery.sys_vars.n_eus = 40;
-         brw->perfquery.sys_vars.n_eu_slices = 2;
-         brw->perfquery.sys_vars.n_eu_sub_slices = 2;
-         brw->perfquery.sys_vars.slice_mask = 0x3;
-         brw->perfquery.sys_vars.subslice_mask = 0xf;
-      } else
-         unreachable("not reached");
-   } else {
-      __DRIscreen *screen = brw->screen->driScrnPriv;
-      drm_i915_getparam_t gp;
-      int ret;
-      int n_eus = 0;
-      int slice_mask = 0;
-      int ss_mask = 0;
-      int s_max = devinfo->num_slices; /* maximum number of slices */
-      int ss_max = 0; /* maximum number of subslices per slice */
-      uint64_t subslice_mask = 0;
-      int s;
-
-      if (devinfo->gen == 8) {
-         if (devinfo->gt == 1) {
-            ss_max = 2;
-         } else {
-            ss_max = 3;
-         }
-      } else if (devinfo->gen == 9) {
-         /* XXX: beware that the kernel (as of writing) actually works as if
-          * ss_max == 4 since the HW register that reports the global subslice
-          * mask has 4 bits while in practice the limit is 3. It's also
-          * important that we initialize $SubsliceMask with 3 bits per slice
-          * since that's what the counter availability expressions in XML
-          * expect.
-          */
-         ss_max = 3;
-      } else
-         return false;
+   free(topo_info);
 
-      gp.param = I915_PARAM_EU_TOTAL;
-      gp.value = &n_eus;
-      ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-      if (ret)
-         return false;
+   return true;
+}
 
-      gp.param = I915_PARAM_SLICE_MASK;
-      gp.value = &slice_mask;
-      ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-      if (ret)
-         return false;
+static bool
+getparam_topology(struct brw_context *brw)
+{
+   __DRIscreen *screen = brw->screen->driScrnPriv;
+   drm_i915_getparam_t gp;
+   int ret;
+
+   int slice_mask = 0;
+   gp.param = I915_PARAM_SLICE_MASK;
+   gp.value = &slice_mask;
+   ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+   if (ret)
+      return false;
 
-      gp.param = I915_PARAM_SUBSLICE_MASK;
-      gp.value = &ss_mask;
-      ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-      if (ret)
-         return false;
+   int subslice_mask = 0;
+   gp.param = I915_PARAM_SUBSLICE_MASK;
+   gp.value = &subslice_mask;
+   ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+   if (ret)
+      return false;
 
-      brw->perfquery.sys_vars.n_eus = n_eus;
-      brw->perfquery.sys_vars.n_eu_slices = __builtin_popcount(slice_mask);
-      brw->perfquery.sys_vars.slice_mask = slice_mask;
-
-      /* Note: the _SUBSLICE_MASK param only reports a global subslice mask
-       * which applies to all slices.
-       *
-       * Note: some of the metrics we have (as described in XML) are
-       * conditional on a $SubsliceMask variable which is expected to also
-       * reflect the slice mask by packing together subslice masks for each
-       * slice in one value..
-       */
-      for (s = 0; s < s_max; s++) {
-         if (slice_mask & (1<<s)) {
-            subslice_mask |= ss_mask << (ss_max * s);
-         }
-      }
+   gen_device_info_update_from_masks(&brw->screen->devinfo,
+                                     slice_mask,
+                                     subslice_mask,
+                                     brw->screen->eu_total);
 
-      brw->perfquery.sys_vars.subslice_mask = subslice_mask;
-      brw->perfquery.sys_vars.n_eu_sub_slices =
-         __builtin_popcount(subslice_mask);
+   return true;
+}
+
+static void
+compute_topology_builtins(struct brw_context *brw)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   brw->perfquery.sys_vars.slice_mask = devinfo->slice_masks;
+   brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
+
+   for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
+      brw->perfquery.sys_vars.n_eu_sub_slices +=
+         util_bitcount(devinfo->subslice_masks[i]);
    }
 
+   for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
+      brw->perfquery.sys_vars.n_eus += util_bitcount(devinfo->eu_masks[i]);
+
    brw->perfquery.sys_vars.eu_threads_count =
       brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
 
+   /* At the moment the subslice mask builtin has groups of 3bits for each
+    * slice.
+    *
+    * Ideally equations would be updated to have a slice/subslice query
+    * function/operator.
+    */
+   brw->perfquery.sys_vars.subslice_mask = 0;
+   for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
+      for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
+         if (gen_device_info_subslice_available(devinfo, s, ss))
+            brw->perfquery.sys_vars.subslice_mask |= 1UL << (s * 3 + ss);
+      }
+   }
+}
+
+static bool
+init_oa_sys_vars(struct brw_context *brw)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
+   __DRIscreen *screen = brw->screen->driScrnPriv;
+
+   if (!read_sysfs_drm_device_file_uint64(brw, "gt_min_freq_mhz", &min_freq_mhz))
+      return false;
+
+   if (!read_sysfs_drm_device_file_uint64(brw,  "gt_max_freq_mhz", &max_freq_mhz))
+      return false;
+
+   if (!query_topology(brw)) {
+      /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
+      if (devinfo->gen >= 10)
+         return false;
+
+      if (!getparam_topology(brw)) {
+         /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
+         if (devinfo->gen >= 8)
+            return false;
+
+         /* On Haswell, the values are already computed for us in
+          * gen_device_info.
+          */
+      }
+   }
+
+   memset(&brw->perfquery.sys_vars, 0, sizeof(brw->perfquery.sys_vars));
+   brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
+   brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
+   brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
+   brw->perfquery.sys_vars.revision = intel_device_get_revision(screen->fd);
+   compute_topology_builtins(brw);
+
    return true;
 }
 
 static bool
-get_sysfs_dev_dir(struct brw_context *brw,
-                  char *path_buf,
-                  int path_buf_len)
+get_sysfs_dev_dir(struct brw_context *brw)
 {
    __DRIscreen *screen = brw->screen->driScrnPriv;
    struct stat sb;
@@ -1825,9 +2059,7 @@ get_sysfs_dev_dir(struct brw_context *brw,
    struct dirent *drm_entry;
    int len;
 
-   assert(path_buf);
-   assert(path_buf_len);
-   path_buf[0] = '\0';
+   brw->perfquery.sysfs_dev_dir[0] = '\0';
 
    if (fstat(screen->fd, &sb)) {
       DBG("Failed to stat DRM fd\n");
@@ -1842,16 +2074,17 @@ get_sysfs_dev_dir(struct brw_context *brw,
       return false;
    }
 
-   len = snprintf(path_buf, path_buf_len,
+   len = snprintf(brw->perfquery.sysfs_dev_dir,
+                  sizeof(brw->perfquery.sysfs_dev_dir),
                   "/sys/dev/char/%d:%d/device/drm", maj, min);
-   if (len < 0 || len >= path_buf_len) {
+   if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir)) {
       DBG("Failed to concatenate sysfs path to drm device\n");
       return false;
    }
 
-   drmdir = opendir(path_buf);
+   drmdir = opendir(brw->perfquery.sysfs_dev_dir);
    if (!drmdir) {
-      DBG("Failed to open %s: %m\n", path_buf);
+      DBG("Failed to open %s: %m\n", brw->perfquery.sysfs_dev_dir);
       return false;
    }
 
@@ -1860,11 +2093,12 @@ get_sysfs_dev_dir(struct brw_context *brw,
            drm_entry->d_type == DT_LNK) &&
           strncmp(drm_entry->d_name, "card", 4) == 0)
       {
-         len = snprintf(path_buf, path_buf_len,
+         len = snprintf(brw->perfquery.sysfs_dev_dir,
+                        sizeof(brw->perfquery.sysfs_dev_dir),
                         "/sys/dev/char/%d:%d/device/drm/%s",
                         maj, min, drm_entry->d_name);
          closedir(drmdir);
-         if (len < 0 || len >= path_buf_len)
+         if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir))
             return false;
          else
             return true;
@@ -1900,6 +2134,23 @@ get_register_queries_function(const struct gen_device_info *devinfo)
       if (devinfo->gt == 4)
          return brw_oa_register_queries_sklgt4;
    }
+   if (devinfo->is_kabylake) {
+      if (devinfo->gt == 2)
+         return brw_oa_register_queries_kblgt2;
+      if (devinfo->gt == 3)
+         return brw_oa_register_queries_kblgt3;
+   }
+   if (devinfo->is_geminilake)
+      return brw_oa_register_queries_glk;
+   if (devinfo->is_coffeelake) {
+      if (devinfo->gt == 2)
+         return brw_oa_register_queries_cflgt2;
+      if (devinfo->gt == 3)
+         return brw_oa_register_queries_cflgt3;
+   }
+   if (devinfo->is_cannonlake)
+      return brw_oa_register_queries_cnl;
+
    return NULL;
 }
 
@@ -1910,13 +2161,13 @@ brw_init_perf_query_info(struct gl_context *ctx)
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    bool i915_perf_oa_available = false;
    struct stat sb;
-   char sysfs_dev_dir[128];
    perf_register_oa_queries_t oa_register;
 
    if (brw->perfquery.n_queries)
       return brw->perfquery.n_queries;
 
    init_pipeline_statistic_query_registers(brw);
+   brw_perf_query_register_mdapi_statistic_query(brw);
 
    oa_register = get_register_queries_function(devinfo);
 
@@ -1942,8 +2193,8 @@ brw_init_perf_query_info(struct gl_context *ctx)
 
    if (i915_perf_oa_available &&
        oa_register &&
-       get_sysfs_dev_dir(brw, sysfs_dev_dir, sizeof(sysfs_dev_dir)) &&
-       init_oa_sys_vars(brw, sysfs_dev_dir))
+       get_sysfs_dev_dir(brw) &&
+       init_oa_sys_vars(brw))
    {
       brw->perfquery.oa_metrics_table =
          _mesa_hash_table_create(NULL, _mesa_key_hash_string,
@@ -1954,7 +2205,13 @@ brw_init_perf_query_info(struct gl_context *ctx)
        */
       oa_register(brw);
 
-      enumerate_sysfs_metrics(brw, sysfs_dev_dir);
+      if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
+          kernel_has_dynamic_config_support(brw))
+         init_oa_configs(brw);
+      else
+         enumerate_sysfs_metrics(brw);
+
+      brw_perf_query_register_mdapi_oa_query(brw);
    }
 
    brw->perfquery.unaccumulated =