intel/perf: make oa_sample_buffers private
[mesa.git] / src / intel / perf / gen_perf.c
index fe5bbabe3c81e41ea87c06ed1a1586b5c763b11d..9b17b3503634372de089fbf15a5eb6d5536578a7 100644 (file)
 #define MI_RPC_BO_END_OFFSET_BYTES  (MI_RPC_BO_SIZE / 2)
 #define MI_FREQ_END_OFFSET_BYTES    (3076)
 
+#define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
+
+#define GEN7_RPSTAT1                       0xA01C
+#define  GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT   7
+#define  GEN7_RPSTAT1_CURR_GT_FREQ_MASK    INTEL_MASK(13, 7)
+#define  GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT   0
+#define  GEN7_RPSTAT1_PREV_GT_FREQ_MASK    INTEL_MASK(6, 0)
+
+#define GEN9_RPSTAT0                       0xA01C
+#define  GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT   23
+#define  GEN9_RPSTAT0_CURR_GT_FREQ_MASK    INTEL_MASK(31, 23)
+#define  GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT   0
+#define  GEN9_RPSTAT0_PREV_GT_FREQ_MASK    INTEL_MASK(8, 0)
+
+#define GEN6_SO_PRIM_STORAGE_NEEDED     0x2280
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n)  (0x5240 + (n) * 8)
+#define GEN6_SO_NUM_PRIMS_WRITTEN       0x2288
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n)    (0x5200 + (n) * 8)
+
 #define MAP_READ  (1 << 0)
 #define MAP_WRITE (1 << 1)
 
+/**
+ * Periodic OA samples are read() into these buffer structures via the
+ * i915 perf kernel interface and appended to the
+ * perf_ctx->sample_buffers linked list. When we process the
+ * results of an OA metrics query we need to consider all the periodic
+ * samples between the Begin and End MI_REPORT_PERF_COUNT command
+ * markers.
+ *
+ * 'Periodic' is a simplification as there are other automatic reports
+ * written by the hardware also buffered here.
+ *
+ * Considering three queries, A, B and C:
+ *
+ *  Time ---->
+ *                ________________A_________________
+ *                |                                |
+ *                | ________B_________ _____C___________
+ *                | |                | |           |   |
+ *
+ * And an illustration of sample buffers read over this time frame:
+ * [HEAD ][     ][     ][     ][     ][     ][     ][     ][TAIL ]
+ *
+ * These nodes may hold samples for query A:
+ * [     ][     ][  A  ][  A  ][  A  ][  A  ][  A  ][     ][     ]
+ *
+ * These nodes may hold samples for query B:
+ * [     ][     ][  B  ][  B  ][  B  ][     ][     ][     ][     ]
+ *
+ * These nodes may hold samples for query C:
+ * [     ][     ][     ][     ][     ][  C  ][  C  ][  C  ][     ]
+ *
+ * The illustration assumes we have an even distribution of periodic
+ * samples so all nodes have the same size plotted against time:
+ *
+ * Note, to simplify code, the list is never empty.
+ *
+ * With overlapping queries we can see that periodic OA reports may
+ * relate to multiple queries and care needs to be take to keep
+ * track of sample buffers until there are no queries that might
+ * depend on their contents.
+ *
+ * We use a node ref counting system where a reference ensures that a
+ * node and all following nodes can't be freed/recycled until the
+ * reference drops to zero.
+ *
+ * E.g. with a ref of one here:
+ * [  0  ][  0  ][  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
+ *
+ * These nodes could be freed or recycled ("reaped"):
+ * [  0  ][  0  ]
+ *
+ * These must be preserved until the leading ref drops to zero:
+ *               [  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
+ *
+ * When a query starts we take a reference on the current tail of
+ * the list, knowing that no already-buffered samples can possibly
+ * relate to the newly-started query. A pointer to this node is
+ * also saved in the query object's ->oa.samples_head.
+ *
+ * E.g. starting query A while there are two nodes in .sample_buffers:
+ *                ________________A________
+ *                |
+ *
+ * [  0  ][  1  ]
+ *           ^_______ Add a reference and store pointer to node in
+ *                    A->oa.samples_head
+ *
+ * Moving forward to when the B query starts with no new buffer nodes:
+ * (for reference, i915 perf reads() are only done when queries finish)
+ *                ________________A_______
+ *                | ________B___
+ *                | |
+ *
+ * [  0  ][  2  ]
+ *           ^_______ Add a reference and store pointer to
+ *                    node in B->oa.samples_head
+ *
+ * Once a query is finished, after an OA query has become 'Ready',
+ * once the End OA report has landed and after we we have processed
+ * all the intermediate periodic samples then we drop the
+ * ->oa.samples_head reference we took at the start.
+ *
+ * So when the B query has finished we have:
+ *                ________________A________
+ *                | ______B___________
+ *                | |                |
+ * [  0  ][  1  ][  0  ][  0  ][  0  ]
+ *           ^_______ Drop B->oa.samples_head reference
+ *
+ * We still can't free these due to the A->oa.samples_head ref:
+ *        [  1  ][  0  ][  0  ][  0  ]
+ *
+ * When the A query finishes: (note there's a new ref for C's samples_head)
+ *                ________________A_________________
+ *                |                                |
+ *                |                    _____C_________
+ *                |                    |           |
+ * [  0  ][  0  ][  0  ][  0  ][  1  ][  0  ][  0  ]
+ *           ^_______ Drop A->oa.samples_head reference
+ *
+ * And we can now reap these nodes up to the C->oa.samples_head:
+ * [  X  ][  X  ][  X  ][  X  ]
+ *                  keeping -> [  1  ][  0  ][  0  ]
+ *
+ * We reap old sample buffers each time we finish processing an OA
+ * query by iterating the sample_buffers list from the head until we
+ * find a referenced node and stop.
+ *
+ * Reaped buffers move to a perfquery.free_sample_buffers list and
+ * when we come to read() we first look to recycle a buffer from the
+ * free_sample_buffers list before allocating a new buffer.
+ */
+struct oa_sample_buf {
+   struct exec_node link;
+   int refcount;
+   int len;
+   uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
+   uint32_t last_timestamp;
+};
+
+struct gen_perf_query_object *
+gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
+{
+   const struct gen_perf_query_info *query =
+      &perf_ctx->perf->queries[query_index];
+   struct gen_perf_query_object *obj =
+      calloc(1, sizeof(struct gen_perf_query_object));
+
+   if (!obj)
+      return NULL;
+
+   obj->queryinfo = query;
+
+   perf_ctx->n_query_instances++;
+   return obj;
+}
+
 static bool
 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
 {
@@ -151,13 +307,32 @@ read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
    return read_file_uint64(buf, value);
 }
 
+static inline struct gen_perf_query_info *
+append_query_info(struct gen_perf_config *perf, int max_counters)
+{
+   struct gen_perf_query_info *query;
+
+   perf->queries = reralloc(perf, perf->queries,
+                            struct gen_perf_query_info,
+                            ++perf->n_queries);
+   query = &perf->queries[perf->n_queries - 1];
+   memset(query, 0, sizeof(*query));
+
+   if (max_counters > 0) {
+      query->max_counters = max_counters;
+      query->counters =
+         rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
+   }
+
+   return query;
+}
+
 static void
 register_oa_config(struct gen_perf_config *perf,
                    const struct gen_perf_query_info *query,
                    uint64_t config_id)
 {
-   struct gen_perf_query_info *registred_query =
-      gen_perf_query_append_query_info(perf, 0);
+   struct gen_perf_query_info *registred_query = append_query_info(perf, 0);
 
    *registred_query = *query;
    registred_query->oa_metrics_set_id = config_id;
@@ -381,8 +556,123 @@ get_register_queries_function(const struct gen_device_info *devinfo)
    return NULL;
 }
 
-bool
-gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
+static inline void
+add_stat_reg(struct gen_perf_query_info *query, uint32_t reg,
+             uint32_t numerator, uint32_t denominator,
+             const char *name, const char *description)
+{
+   struct gen_perf_query_counter *counter;
+
+   assert(query->n_counters < query->max_counters);
+
+   counter = &query->counters[query->n_counters];
+   counter->name = name;
+   counter->desc = description;
+   counter->type = GEN_PERF_COUNTER_TYPE_RAW;
+   counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
+   counter->offset = sizeof(uint64_t) * query->n_counters;
+   counter->pipeline_stat.reg = reg;
+   counter->pipeline_stat.numerator = numerator;
+   counter->pipeline_stat.denominator = denominator;
+
+   query->n_counters++;
+}
+
+static inline void
+add_basic_stat_reg(struct gen_perf_query_info *query,
+                                       uint32_t reg, const char *name)
+{
+   add_stat_reg(query, reg, 1, 1, name, name);
+}
+
+static void
+load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
+                                         const struct gen_device_info *devinfo)
+{
+   struct gen_perf_query_info *query =
+      append_query_info(perf_cfg, MAX_STAT_COUNTERS);
+
+   query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
+   query->name = "Pipeline Statistics Registers";
+
+   add_basic_stat_reg(query, IA_VERTICES_COUNT,
+                                          "N vertices submitted");
+   add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
+                                          "N primitives submitted");
+   add_basic_stat_reg(query, VS_INVOCATION_COUNT,
+                                          "N vertex shader invocations");
+
+   if (devinfo->gen == 6) {
+      add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
+                   "SO_PRIM_STORAGE_NEEDED",
+                   "N geometry shader stream-out primitives (total)");
+      add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
+                   "SO_NUM_PRIMS_WRITTEN",
+                   "N geometry shader stream-out primitives (written)");
+   } else {
+      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
+                   "SO_PRIM_STORAGE_NEEDED (Stream 0)",
+                   "N stream-out (stream 0) primitives (total)");
+      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
+                   "SO_PRIM_STORAGE_NEEDED (Stream 1)",
+                   "N stream-out (stream 1) primitives (total)");
+      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
+                   "SO_PRIM_STORAGE_NEEDED (Stream 2)",
+                   "N stream-out (stream 2) primitives (total)");
+      add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
+                   "SO_PRIM_STORAGE_NEEDED (Stream 3)",
+                   "N stream-out (stream 3) primitives (total)");
+      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
+                   "SO_NUM_PRIMS_WRITTEN (Stream 0)",
+                   "N stream-out (stream 0) primitives (written)");
+      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
+                   "SO_NUM_PRIMS_WRITTEN (Stream 1)",
+                   "N stream-out (stream 1) primitives (written)");
+      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
+                   "SO_NUM_PRIMS_WRITTEN (Stream 2)",
+                   "N stream-out (stream 2) primitives (written)");
+      add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
+                   "SO_NUM_PRIMS_WRITTEN (Stream 3)",
+                   "N stream-out (stream 3) primitives (written)");
+   }
+
+   add_basic_stat_reg(query, HS_INVOCATION_COUNT,
+                                          "N TCS shader invocations");
+   add_basic_stat_reg(query, DS_INVOCATION_COUNT,
+                                          "N TES shader invocations");
+
+   add_basic_stat_reg(query, GS_INVOCATION_COUNT,
+                                          "N geometry shader invocations");
+   add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
+                                          "N geometry shader primitives emitted");
+
+   add_basic_stat_reg(query, CL_INVOCATION_COUNT,
+                                          "N primitives entering clipping");
+   add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
+                                          "N primitives leaving clipping");
+
+   if (devinfo->is_haswell || devinfo->gen == 8) {
+      add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
+                   "N fragment shader invocations",
+                   "N fragment shader invocations");
+   } else {
+      add_basic_stat_reg(query, PS_INVOCATION_COUNT,
+                                             "N fragment shader invocations");
+   }
+
+   add_basic_stat_reg(query, PS_DEPTH_COUNT,
+                                          "N z-pass fragments");
+
+   if (devinfo->gen >= 7) {
+      add_basic_stat_reg(query, CS_INVOCATION_COUNT,
+                                             "N compute shader invocations");
+   }
+
+   query->data_size = sizeof(uint64_t) * query->n_counters;
+}
+
+static bool
+load_oa_metrics(struct gen_perf_config *perf, int fd,
                          const struct gen_device_info *devinfo)
 {
    perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
@@ -568,6 +858,62 @@ gen_perf_query_result_clear(struct gen_perf_query_result *result)
    result->hw_id = 0xffffffff; /* invalid */
 }
 
+static void
+gen_perf_query_register_mdapi_statistic_query(struct gen_perf_config *perf_cfg,
+                                              const struct gen_device_info *devinfo)
+{
+   if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
+      return;
+
+   struct gen_perf_query_info *query =
+      append_query_info(perf_cfg, MAX_STAT_COUNTERS);
+
+   query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
+   query->name = "Intel_Raw_Pipeline_Statistics_Query";
+
+   /* The order has to match mdapi_pipeline_metrics. */
+   add_basic_stat_reg(query, IA_VERTICES_COUNT,
+                      "N vertices submitted");
+   add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
+                      "N primitives submitted");
+   add_basic_stat_reg(query, VS_INVOCATION_COUNT,
+                      "N vertex shader invocations");
+   add_basic_stat_reg(query, GS_INVOCATION_COUNT,
+                      "N geometry shader invocations");
+   add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
+                      "N geometry shader primitives emitted");
+   add_basic_stat_reg(query, CL_INVOCATION_COUNT,
+                      "N primitives entering clipping");
+   add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
+                      "N primitives leaving clipping");
+   if (devinfo->is_haswell || devinfo->gen == 8) {
+      add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
+                   "N fragment shader invocations",
+                   "N fragment shader invocations");
+   } else {
+      add_basic_stat_reg(query, PS_INVOCATION_COUNT,
+                         "N fragment shader invocations");
+   }
+   add_basic_stat_reg(query, HS_INVOCATION_COUNT,
+                      "N TCS shader invocations");
+   add_basic_stat_reg(query, DS_INVOCATION_COUNT,
+                      "N TES shader invocations");
+   if (devinfo->gen >= 7) {
+      add_basic_stat_reg(query, CS_INVOCATION_COUNT,
+                         "N compute shader invocations");
+   }
+
+   if (devinfo->gen >= 10) {
+      /* Reuse existing CS invocation register until we can expose this new
+       * one.
+       */
+      add_basic_stat_reg(query, CS_INVOCATION_COUNT,
+                         "Reserved1");
+   }
+
+   query->data_size = sizeof(uint64_t) * query->n_counters;
+}
+
 static void
 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
                               const char *name,
@@ -604,9 +950,9 @@ fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
                                  sizeof(struct_name.field_name[0]),     \
                                  GEN_PERF_COUNTER_DATA_TYPE_##type_name)
 
-void
-gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
-                                       struct gen_perf_config *perf)
+static void
+register_mdapi_oa_query(const struct gen_device_info *devinfo,
+                        struct gen_perf_config *perf)
 {
    struct gen_perf_query_info *query = NULL;
 
@@ -618,7 +964,7 @@ gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
 
    switch (devinfo->gen) {
    case 7: {
-      query = gen_perf_query_append_query_info(perf, 1 + 45 + 16 + 7);
+      query = append_query_info(perf, 1 + 45 + 16 + 7);
       query->oa_format = I915_OA_FORMAT_A45_B8_C8;
 
       struct gen7_mdapi_metrics metric_data;
@@ -643,7 +989,7 @@ gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
       break;
    }
    case 8: {
-      query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16);
+      query = append_query_info(perf, 2 + 36 + 16 + 16);
       query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
 
       struct gen8_mdapi_metrics metric_data;
@@ -680,7 +1026,7 @@ gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
    case 9:
    case 10:
    case 11: {
-      query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
+      query = append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
       query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
 
       struct gen9_mdapi_metrics metric_data;
@@ -742,62 +1088,6 @@ gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
    }
 }
 
-void
-gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
-                                              struct gen_perf_config *perf)
-{
-   if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
-      return;
-
-   struct gen_perf_query_info *query =
-      gen_perf_query_append_query_info(perf, MAX_STAT_COUNTERS);
-
-   query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
-   query->name = "Intel_Raw_Pipeline_Statistics_Query";
-
-   /* The order has to match mdapi_pipeline_metrics. */
-   gen_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
-                                          "N vertices submitted");
-   gen_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
-                                          "N primitives submitted");
-   gen_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
-                                          "N vertex shader invocations");
-   gen_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
-                                          "N geometry shader invocations");
-   gen_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
-                                          "N geometry shader primitives emitted");
-   gen_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
-                                          "N primitives entering clipping");
-   gen_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
-                                          "N primitives leaving clipping");
-   if (devinfo->is_haswell || devinfo->gen == 8) {
-      gen_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
-                                       "N fragment shader invocations",
-                                       "N fragment shader invocations");
-   } else {
-      gen_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
-                                             "N fragment shader invocations");
-   }
-   gen_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
-                                          "N TCS shader invocations");
-   gen_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
-                                          "N TES shader invocations");
-   if (devinfo->gen >= 7) {
-      gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
-                                             "N compute shader invocations");
-   }
-
-   if (devinfo->gen >= 10) {
-      /* Reuse existing CS invocation register until we can expose this new
-       * one.
-       */
-      gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
-                                             "Reserved1");
-   }
-
-   query->data_size = sizeof(uint64_t) * query->n_counters;
-}
-
 uint64_t
 gen_perf_query_get_metric_id(struct gen_perf_config *perf,
                              const struct gen_perf_query_info *query)
@@ -997,6 +1287,17 @@ gen_perf_dec_n_users(struct gen_perf_context *perf_ctx)
    }
 }
 
+void
+gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
+                      const struct gen_device_info *devinfo,
+                      int drm_fd)
+{
+   load_pipeline_statistic_metrics(perf_cfg, devinfo);
+   gen_perf_query_register_mdapi_statistic_query(perf_cfg, devinfo);
+   if (load_oa_metrics(perf_cfg, drm_fd, devinfo))
+      register_mdapi_oa_query(devinfo, perf_cfg);
+}
+
 void
 gen_perf_init_context(struct gen_perf_context *perf_ctx,
                       struct gen_perf_config *perf_cfg,
@@ -1529,3 +1830,439 @@ gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
 
    return false;
 }
+
+/**
+ * Remove a query from the global list of unaccumulated queries once
+ * after successfully accumulating the OA reports associated with the
+ * query in accumulate_oa_reports() or when discarding unwanted query
+ * results.
+ */
+static void
+drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
+                                   struct gen_perf_query_object *query)
+{
+   for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
+      if (perf_ctx->unaccumulated[i] == query) {
+         int last_elt = --perf_ctx->unaccumulated_elements;
+
+         if (i == last_elt)
+            perf_ctx->unaccumulated[i] = NULL;
+         else {
+            perf_ctx->unaccumulated[i] =
+               perf_ctx->unaccumulated[last_elt];
+         }
+
+         break;
+      }
+   }
+
+   /* Drop our samples_head reference so that associated periodic
+    * sample data buffers can potentially be reaped if they aren't
+    * referenced by any other queries...
+    */
+
+   struct oa_sample_buf *buf =
+      exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
+
+   assert(buf->refcount > 0);
+   buf->refcount--;
+
+   query->oa.samples_head = NULL;
+
+   gen_perf_reap_old_sample_buffers(perf_ctx);
+}
+
+/* In general if we see anything spurious while accumulating results,
+ * we don't try and continue accumulating the current query, hoping
+ * for the best, we scrap anything outstanding, and then hope for the
+ * best with new queries.
+ */
+static void
+discard_all_queries(struct gen_perf_context *perf_ctx)
+{
+   while (perf_ctx->unaccumulated_elements) {
+      struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
+
+      query->oa.results_accumulated = true;
+      drop_from_unaccumulated_query_list(perf_ctx, query);
+
+      gen_perf_dec_n_users(perf_ctx);
+   }
+}
+
+/**
+ * Accumulate raw OA counter values based on deltas between pairs of
+ * OA reports.
+ *
+ * Accumulation starts from the first report captured via
+ * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
+ * last MI_RPC report requested by brw_end_perf_query(). Between these
+ * two reports there may also some number of periodically sampled OA
+ * reports collected via the i915 perf interface - depending on the
+ * duration of the query.
+ *
+ * These periodic snapshots help to ensure we handle counter overflow
+ * correctly by being frequent enough to ensure we don't miss multiple
+ * overflows of a counter between snapshots. For Gen8+ the i915 perf
+ * snapshots provide the extra context-switch reports that let us
+ * subtract out the progress of counters associated with other
+ * contexts running on the system.
+ */
+static void
+accumulate_oa_reports(struct gen_perf_context *perf_ctx,
+                      struct gen_perf_query_object *query)
+{
+   const struct gen_device_info *devinfo = perf_ctx->devinfo;
+   uint32_t *start;
+   uint32_t *last;
+   uint32_t *end;
+   struct exec_node *first_samples_node;
+   bool in_ctx = true;
+   int out_duration = 0;
+
+   assert(query->oa.map != NULL);
+
+   start = last = query->oa.map;
+   end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
+
+   if (start[0] != query->oa.begin_report_id) {
+      DBG("Spurious start report id=%"PRIu32"\n", start[0]);
+      goto error;
+   }
+   if (end[0] != (query->oa.begin_report_id + 1)) {
+      DBG("Spurious end report id=%"PRIu32"\n", end[0]);
+      goto error;
+   }
+
+   /* See if we have any periodic reports to accumulate too... */
+
+   /* N.B. The oa.samples_head was set when the query began and
+    * pointed to the tail of the perf_ctx->sample_buffers list at
+    * the time the query started. Since the buffer existed before the
+    * first MI_REPORT_PERF_COUNT command was emitted we therefore know
+    * that no data in this particular node's buffer can possibly be
+    * associated with the query - so skip ahead one...
+    */
+   first_samples_node = query->oa.samples_head->next;
+
+   foreach_list_typed_from(struct oa_sample_buf, buf, link,
+                           &perf_ctx.sample_buffers,
+                           first_samples_node)
+   {
+      int offset = 0;
+
+      while (offset < buf->len) {
+         const struct drm_i915_perf_record_header *header =
+            (const struct drm_i915_perf_record_header *)(buf->buf + offset);
+
+         assert(header->size != 0);
+         assert(header->size <= buf->len);
+
+         offset += header->size;
+
+         switch (header->type) {
+         case DRM_I915_PERF_RECORD_SAMPLE: {
+            uint32_t *report = (uint32_t *)(header + 1);
+            bool add = true;
+
+            /* Ignore reports that come before the start marker.
+             * (Note: takes care to allow overflow of 32bit timestamps)
+             */
+            if (gen_device_info_timebase_scale(devinfo,
+                                               report[1] - start[1]) > 5000000000) {
+               continue;
+            }
+
+            /* Ignore reports that come after the end marker.
+             * (Note: takes care to allow overflow of 32bit timestamps)
+             */
+            if (gen_device_info_timebase_scale(devinfo,
+                                               report[1] - end[1]) <= 5000000000) {
+               goto end;
+            }
+
+            /* For Gen8+ since the counters continue while other
+             * contexts are running we need to discount any unrelated
+             * deltas. The hardware automatically generates a report
+             * on context switch which gives us a new reference point
+             * to continuing adding deltas from.
+             *
+             * For Haswell we can rely on the HW to stop the progress
+             * of OA counters while any other context is acctive.
+             */
+            if (devinfo->gen >= 8) {
+               if (in_ctx && report[2] != query->oa.result.hw_id) {
+                  DBG("i915 perf: Switch AWAY (observed by ID change)\n");
+                  in_ctx = false;
+                  out_duration = 0;
+               } else if (in_ctx == false && report[2] == query->oa.result.hw_id) {
+                  DBG("i915 perf: Switch TO\n");
+                  in_ctx = true;
+
+                  /* From experimentation in IGT, we found that the OA unit
+                   * might label some report as "idle" (using an invalid
+                   * context ID), right after a report for a given context.
+                   * Deltas generated by those reports actually belong to the
+                   * previous context, even though they're not labelled as
+                   * such.
+                   *
+                   * We didn't *really* Switch AWAY in the case that we e.g.
+                   * saw a single periodic report while idle...
+                   */
+                  if (out_duration >= 1)
+                     add = false;
+               } else if (in_ctx) {
+                  assert(report[2] == query->oa.result.hw_id);
+                  DBG("i915 perf: Continuation IN\n");
+               } else {
+                  assert(report[2] != query->oa.result.hw_id);
+                  DBG("i915 perf: Continuation OUT\n");
+                  add = false;
+                  out_duration++;
+               }
+            }
+
+            if (add) {
+               gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
+                                                last, report);
+            }
+
+            last = report;
+
+            break;
+         }
+
+         case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
+             DBG("i915 perf: OA error: all reports lost\n");
+             goto error;
+         case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
+             DBG("i915 perf: OA report lost\n");
+             break;
+         }
+      }
+   }
+
+end:
+
+   gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
+                                    last, end);
+
+   query->oa.results_accumulated = true;
+   drop_from_unaccumulated_query_list(perf_ctx, query);
+   gen_perf_dec_n_users(perf_ctx);
+
+   return;
+
+error:
+
+   discard_all_queries(perf_ctx);
+}
+
+void
+gen_perf_delete_query(struct gen_perf_context *perf_ctx,
+                      struct gen_perf_query_object *query)
+{
+   struct gen_perf_config *perf_cfg = perf_ctx->perf;
+
+   /* We can assume that the frontend waits for a query to complete
+    * before ever calling into here, so we don't have to worry about
+    * deleting an in-flight query object.
+    */
+   switch (query->queryinfo->kind) {
+   case GEN_PERF_QUERY_TYPE_OA:
+   case GEN_PERF_QUERY_TYPE_RAW:
+      if (query->oa.bo) {
+         if (!query->oa.results_accumulated) {
+            drop_from_unaccumulated_query_list(perf_ctx, query);
+            gen_perf_dec_n_users(perf_ctx);
+         }
+
+         perf_cfg->vtbl.bo_unreference(query->oa.bo);
+         query->oa.bo = NULL;
+      }
+
+      query->oa.results_accumulated = false;
+      break;
+
+   case GEN_PERF_QUERY_TYPE_PIPELINE:
+      if (query->pipeline_stats.bo) {
+         perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
+         query->pipeline_stats.bo = NULL;
+      }
+      break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
+   }
+
+   /* As an indication that the INTEL_performance_query extension is no
+    * longer in use, it's a good time to free our cache of sample
+    * buffers and close any current i915-perf stream.
+    */
+   if (--perf_ctx->n_query_instances == 0) {
+      gen_perf_free_sample_bufs(perf_ctx);
+      gen_perf_close(perf_ctx, query->queryinfo);
+   }
+
+   free(query);
+}
+
+#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
+
+static void
+read_gt_frequency(struct gen_perf_context *perf_ctx,
+                  struct gen_perf_query_object *obj)
+{
+   const struct gen_device_info *devinfo = perf_ctx->devinfo;
+   uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
+      end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
+
+   switch (devinfo->gen) {
+   case 7:
+   case 8:
+      obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
+      obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
+      break;
+   case 9:
+   case 10:
+   case 11:
+      obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
+      obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
+      break;
+   default:
+      unreachable("unexpected gen");
+   }
+
+   /* Put the numbers into Hz. */
+   obj->oa.gt_frequency[0] *= 1000000ULL;
+   obj->oa.gt_frequency[1] *= 1000000ULL;
+}
+
+static int
+get_oa_counter_data(struct gen_perf_context *perf_ctx,
+                    struct gen_perf_query_object *query,
+                    size_t data_size,
+                    uint8_t *data)
+{
+   struct gen_perf_config *perf_cfg = perf_ctx->perf;
+   const struct gen_perf_query_info *queryinfo = query->queryinfo;
+   int n_counters = queryinfo->n_counters;
+   int written = 0;
+
+   for (int i = 0; i < n_counters; i++) {
+      const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
+      uint64_t *out_uint64;
+      float *out_float;
+      size_t counter_size = gen_perf_query_counter_get_size(counter);
+
+      if (counter_size) {
+         switch (counter->data_type) {
+         case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
+            out_uint64 = (uint64_t *)(data + counter->offset);
+            *out_uint64 =
+               counter->oa_counter_read_uint64(perf_cfg, queryinfo,
+                                               query->oa.result.accumulator);
+            break;
+         case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
+            out_float = (float *)(data + counter->offset);
+            *out_float =
+               counter->oa_counter_read_float(perf_cfg, queryinfo,
+                                              query->oa.result.accumulator);
+            break;
+         default:
+            /* So far we aren't using uint32, double or bool32... */
+            unreachable("unexpected counter data type");
+         }
+         written = counter->offset + counter_size;
+      }
+   }
+
+   return written;
+}
+
+static int
+get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
+                        struct gen_perf_query_object *query,
+                        size_t data_size,
+                        uint8_t *data)
+
+{
+   struct gen_perf_config *perf_cfg = perf_ctx->perf;
+   const struct gen_perf_query_info *queryinfo = query->queryinfo;
+   int n_counters = queryinfo->n_counters;
+   uint8_t *p = data;
+
+   uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
+   uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
+
+   for (int i = 0; i < n_counters; i++) {
+      const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
+      uint64_t value = end[i] - start[i];
+
+      if (counter->pipeline_stat.numerator !=
+          counter->pipeline_stat.denominator) {
+         value *= counter->pipeline_stat.numerator;
+         value /= counter->pipeline_stat.denominator;
+      }
+
+      *((uint64_t *)p) = value;
+      p += 8;
+   }
+
+   perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
+
+   return p - data;
+}
+
+void
+gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
+                        struct gen_perf_query_object *query,
+                        int data_size,
+                        unsigned *data,
+                        unsigned *bytes_written)
+{
+   struct gen_perf_config *perf_cfg = perf_ctx->perf;
+   int written = 0;
+
+   switch (query->queryinfo->kind) {
+   case GEN_PERF_QUERY_TYPE_OA:
+   case GEN_PERF_QUERY_TYPE_RAW:
+      if (!query->oa.results_accumulated) {
+         read_gt_frequency(perf_ctx, query);
+         uint32_t *begin_report = query->oa.map;
+         uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
+         gen_perf_query_result_read_frequencies(&query->oa.result,
+                                                perf_ctx->devinfo,
+                                                begin_report,
+                                                end_report);
+         accumulate_oa_reports(perf_ctx, query);
+         assert(query->oa.results_accumulated);
+
+         perf_cfg->vtbl.bo_unmap(query->oa.bo);
+         query->oa.map = NULL;
+      }
+      if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
+         written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
+      } else {
+         const struct gen_device_info *devinfo = perf_ctx->devinfo;
+
+         written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
+                                                     devinfo, &query->oa.result,
+                                                     query->oa.gt_frequency[0],
+                                                     query->oa.gt_frequency[1]);
+      }
+      break;
+
+   case GEN_PERF_QUERY_TYPE_PIPELINE:
+      written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
+      break;
+
+   default:
+      unreachable("Unknown query type");
+      break;
+   }
+
+   if (bytes_written)
+      *bytes_written = written;
+}