#include "brw_context.h"
#include "brw_defines.h"
-#include "brw_performance_query.h"
#include "intel_batchbuffer.h"
#include "perf/gen_perf.h"
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_GO_TRANSITION (1<<4)
+struct brw_perf_query_object {
+ struct gl_perf_query_object base;
+ struct gen_perf_query_object *query;
+};
+
/** Downcasting convenience macro. */
static inline struct brw_perf_query_object *
brw_perf_query(struct gl_perf_query_object *o)
{
struct gl_context *ctx = brw_void;
struct gl_perf_query_object *o = query_void;
- struct brw_perf_query_object *obj = query_void;
+ struct brw_perf_query_object * brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
*/
static void
snapshot_statistics_registers(struct brw_context *brw,
- struct brw_perf_query_object *obj,
+ struct gen_perf_query_object *obj,
uint32_t offset_in_bytes)
{
- const struct gen_perf_query_info *query = obj->query;
+ const struct gen_perf_query_info *query = obj->queryinfo;
const int n_counters = query->n_counters;
for (int i = 0; i < n_counters; i++) {
*/
static void
add_to_unaccumulated_query_list(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct gen_perf_query_object *obj)
{
if (brw->perf_ctx.unaccumulated_elements >=
brw->perf_ctx.unaccumulated_array_size)
brw->perf_ctx.unaccumulated_array_size *= 1.5;
brw->perf_ctx.unaccumulated =
reralloc(brw, brw->perf_ctx.unaccumulated,
- struct brw_perf_query_object *,
+ struct gen_perf_query_object *,
brw->perf_ctx.unaccumulated_array_size);
}
*/
static void
drop_from_unaccumulated_query_list(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct gen_perf_query_object *obj)
{
for (int i = 0; i < brw->perf_ctx.unaccumulated_elements; i++) {
if (brw->perf_ctx.unaccumulated[i] == obj) {
discard_all_queries(struct brw_context *brw)
{
while (brw->perf_ctx.unaccumulated_elements) {
- struct brw_perf_query_object *obj = brw->perf_ctx.unaccumulated[0];
+ struct gen_perf_query_object *obj = brw->perf_ctx.unaccumulated[0];
obj->oa.results_accumulated = true;
drop_from_unaccumulated_query_list(brw, brw->perf_ctx.unaccumulated[0]);
*/
static bool
read_oa_samples_for_query(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct gen_perf_query_object *obj)
{
uint32_t *start;
uint32_t *last;
*/
static void
accumulate_oa_reports(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct brw_perf_query_object *brw_query)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gl_perf_query_object *o = &obj->base;
+ struct gen_perf_query_object *obj = brw_query->query;
uint32_t *start;
uint32_t *last;
uint32_t *end;
bool in_ctx = true;
int out_duration = 0;
- assert(o->Ready);
+ assert(brw_query->base.Ready);
assert(obj->oa.map != NULL);
start = last = obj->oa.map;
}
if (add) {
- gen_perf_query_result_accumulate(&obj->oa.result, obj->query,
+ gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
last, report);
}
end:
- gen_perf_query_result_accumulate(&obj->oa.result, obj->query,
+ gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
last, end);
- DBG("Marking %d accumulated - results gathered\n", o->Id);
+ DBG("Marking %d accumulated - results gathered\n", brw_query->base.Id);
obj->oa.results_accumulated = true;
drop_from_unaccumulated_query_list(brw, obj);
struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
- const struct gen_perf_query_info *query = obj->query;
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
+ const struct gen_perf_query_info *query = obj->queryinfo;
struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
/* We can assume the frontend hides mistaken attempts to Begin a
/* Take a starting OA counter snapshot. */
brw->perf_ctx.perf->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
- obj->oa.begin_report_id);
+ obj->oa.begin_report_id);
perf_cfg->vtbl.capture_frequency_stat_register(brw, obj->oa.bo,
MI_FREQ_START_OFFSET_BYTES);
struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
DBG("End(%d)\n", o->Id);
*/
brw_emit_mi_flush(brw);
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
struct brw_bo *bo = NULL;
struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
assert(!o->Ready);
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
bo = obj->oa.bo;
* we need to wait for all the reports to come in before we can
* read them.
*/
- if (obj->query->kind == GEN_PERF_QUERY_TYPE_OA ||
- obj->query->kind == GEN_PERF_QUERY_TYPE_RAW) {
+ if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
+ obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
while (!read_oa_samples_for_query(brw, obj))
;
}
struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
if (o->Ready)
return true;
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
return (obj->oa.results_accumulated ||
static void
read_slice_unslice_frequencies(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct gen_perf_query_object *obj)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
uint32_t *begin_report = obj->oa.map, *end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
static void
read_gt_frequency(struct brw_context *brw,
- struct brw_perf_query_object *obj)
+ struct gen_perf_query_object *obj)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
static int
get_oa_counter_data(struct brw_context *brw,
- struct brw_perf_query_object *obj,
+ struct gen_perf_query_object *obj,
size_t data_size,
uint8_t *data)
{
struct gen_perf_config *perf = brw->perf_ctx.perf;
- const struct gen_perf_query_info *query = obj->query;
+ const struct gen_perf_query_info *query = obj->queryinfo;
int n_counters = query->n_counters;
int written = 0;
static int
get_pipeline_stats_data(struct brw_context *brw,
- struct brw_perf_query_object *obj,
+ struct gen_perf_query_object *obj,
size_t data_size,
uint8_t *data)
{
- const struct gen_perf_query_info *query = obj->query;
- int n_counters = obj->query->n_counters;
+ const struct gen_perf_query_info *query = obj->queryinfo;
+ int n_counters = obj->queryinfo->n_counters;
uint8_t *p = data;
uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
GLuint *bytes_written)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
int written = 0;
assert(brw_is_perf_query_ready(ctx, o));
*/
assert(o->Ready);
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
if (!obj->oa.results_accumulated) {
read_gt_frequency(brw, obj);
read_slice_unslice_frequencies(brw, obj);
- accumulate_oa_reports(brw, obj);
+ accumulate_oa_reports(brw, brw_query);
assert(obj->oa.results_accumulated);
brw_bo_unmap(obj->oa.bo);
obj->oa.map = NULL;
}
- if (obj->query->kind == GEN_PERF_QUERY_TYPE_OA) {
+ if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
} else {
const struct gen_device_info *devinfo = &brw->screen->devinfo;
brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
{
struct brw_context *brw = brw_context(ctx);
- const struct gen_perf_query_info *query =
+ const struct gen_perf_query_info *queryinfo =
&brw->perf_ctx.perf->queries[query_index];
- struct brw_perf_query_object *obj =
- calloc(1, sizeof(struct brw_perf_query_object));
+ struct gen_perf_query_object *obj =
+ calloc(1, sizeof(struct gen_perf_query_object));
if (!obj)
return NULL;
- obj->query = query;
+ obj->queryinfo = queryinfo;
brw->perf_ctx.n_query_instances++;
- return &obj->base;
+ struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object));
+ if (unlikely(!brw_query))
+ return NULL;
+ brw_query->query = obj;
+ return &brw_query->base;
}
/**
struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct brw_perf_query_object *obj = brw_perf_query(o);
struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
+ struct brw_perf_query_object *brw_query = brw_perf_query(o);
+ struct gen_perf_query_object *obj = brw_query->query;
/* We can assume that the frontend waits for a query to complete
* before ever calling into here, so we don't have to worry about
DBG("Delete(%d)\n", o->Id);
- switch (obj->query->kind) {
+ switch (obj->queryinfo->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
if (obj->oa.bo) {
*/
if (--brw->perf_ctx.n_query_instances == 0) {
gen_perf_free_sample_bufs(&brw->perf_ctx);
- close_perf(brw, obj->query);
+ close_perf(brw, obj->queryinfo);
}
free(obj);
+ free(brw_query);
}
/******************************************************************************/
brw->perf_ctx.perf);
brw->perf_ctx.unaccumulated =
- ralloc_array(brw, struct brw_perf_query_object *, 2);
+ ralloc_array(brw, struct gen_perf_query_object *, 2);
brw->perf_ctx.unaccumulated_elements = 0;
brw->perf_ctx.unaccumulated_array_size = 2;
+++ /dev/null
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef BRW_PERFORMANCE_QUERY_H
-#define BRW_PERFORMANCE_QUERY_H
-
-#include <stdint.h>
-
-#include "brw_context.h"
-
-#include "perf/gen_perf.h"
-
-struct gen_perf_query_info;
-
-/**
- * i965 representation of a performance query object.
- *
- * NB: We want to keep this structure relatively lean considering that
- * applications may expect to allocate enough objects to be able to
- * query around all draw calls in a frame.
- */
-struct brw_perf_query_object
-{
- struct gl_perf_query_object base;
-
- const struct gen_perf_query_info *query;
-
- /* See query->kind to know which state below is in use... */
- union {
- struct {
-
- /**
- * BO containing OA counter snapshots at query Begin/End time.
- */
- struct brw_bo *bo;
-
- /**
- * Address of mapped of @bo
- */
- void *map;
-
- /**
- * The MI_REPORT_PERF_COUNT command lets us specify a unique
- * ID that will be reflected in the resulting OA report
- * that's written by the GPU. This is the ID we're expecting
- * in the begin report and the the end report should be
- * @begin_report_id + 1.
- */
- int begin_report_id;
-
- /**
- * Reference the head of the brw->perfquery.sample_buffers
- * list at the time that the query started (so we only need
- * to look at nodes after this point when looking for samples
- * related to this query)
- *
- * (See struct brw_oa_sample_buf description for more details)
- */
- struct exec_node *samples_head;
-
- /**
- * false while in the unaccumulated_elements list, and set to
- * true when the final, end MI_RPC snapshot has been
- * accumulated.
- */
- bool results_accumulated;
-
- /**
- * Frequency of the GT at begin and end of the query.
- */
- uint64_t gt_frequency[2];
-
- /**
- * Accumulated OA results between begin and end of the query.
- */
- struct gen_perf_query_result result;
- } oa;
-
- struct {
- /**
- * BO containing starting and ending snapshots for the
- * statistics counters.
- */
- struct brw_bo *bo;
- } pipeline_stats;
- };
-};
-
-
-#endif /* BRW_PERFORMANCE_QUERY_H */