switch (type) {
case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
+ case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
+ case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
+ case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
}
}
-static boolean r600_query_sw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+static bool r600_query_sw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_SCLK:
case R600_QUERY_CURRENT_GPU_MCLK:
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
query->begin_result = 0;
break;
case R600_QUERY_BUFFER_WAIT_TIME:
case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
unreachable("r600_query_sw_begin: bad query type");
}
- return TRUE;
+ return true;
}
static bool r600_query_sw_end(struct r600_common_context *rctx,
case PIPE_QUERY_TIMESTAMP_DISJOINT:
break;
case PIPE_QUERY_GPU_FINISHED:
- rctx->b.flush(&rctx->b, &query->fence, 0);
+ rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
break;
case R600_QUERY_DRAW_CALLS:
query->end_result = rctx->num_draw_calls;
break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_MCLK:
case R600_QUERY_BUFFER_WAIT_TIME:
case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
case R600_QUERY_NUM_SHADERS_CREATED:
query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
break;
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
+ query->end_result = rctx->last_tex_ps_draw_ratio;
+ break;
case R600_QUERY_GPIN_ASIC_ID:
case R600_QUERY_GPIN_NUM_SIMD:
case R600_QUERY_GPIN_NUM_RB:
return true;
}
-static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait,
- union pipe_query_result *result)
+static bool r600_query_sw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ union pipe_query_result *result)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
/* Convert from cycles per millisecond to cycles per second (Hz). */
result->timestamp_disjoint.frequency =
(uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
- result->timestamp_disjoint.disjoint = FALSE;
- return TRUE;
+ result->timestamp_disjoint.disjoint = false;
+ return true;
case PIPE_QUERY_GPU_FINISHED: {
struct pipe_screen *screen = rctx->b.screen;
- result->b = screen->fence_finish(screen, query->fence,
+ result->b = screen->fence_finish(screen, &rctx->b, query->fence,
wait ? PIPE_TIMEOUT_INFINITE : 0);
return result->b;
}
case R600_QUERY_GPIN_ASIC_ID:
result->u32 = 0;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SIMD:
result->u32 = rctx->screen->info.num_good_compute_units;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_RB:
result->u32 = rctx->screen->info.num_render_backends;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SPI:
result->u32 = 1; /* all supported chips have one SPI per SE */
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SE:
result->u32 = rctx->screen->info.max_se;
- return TRUE;
+ return true;
}
result->u64 = query->end_result - query->begin_result;
break;
}
- return TRUE;
+ return true;
}
static struct r600_query_ops sw_query_ops = {
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
FREE(rquery);
}
if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
if (!query->ops->prepare_buffer(ctx, query, buf)) {
- pipe_resource_reference((struct pipe_resource **)&buf, NULL);
+ r600_resource_reference(&buf, NULL);
return NULL;
}
}
.add_result = r600_query_hw_add_result,
};
-boolean r600_query_hw_init(struct r600_common_context *rctx,
- struct r600_query_hw *query)
+bool r600_query_hw_init(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
query->buffer.buf = r600_new_query_buffer(rctx, query);
if (!query->buffer.buf)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
r600_update_prims_generated_query_state(ctx, query->b.type, 1);
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
- TRUE);
+ true);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
/* The queries which need begin already called this in begin_query. */
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
}
/* emit end query */
return rquery->ops->begin(rctx, rquery);
}
-static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
- struct r600_query_hw *query)
+void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
struct r600_query_buffer *prev = query->buffer.previous;
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
/* Obtain a new buffer if the current one can't be mapped without a stall. */
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(rctx, query);
} else {
if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
}
}
}
-boolean r600_query_hw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+bool r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
return false;
}
- r600_query_hw_reset_buffers(rctx, query);
+ if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
+ r600_query_hw_reset_buffers(rctx, query);
r600_query_hw_emit_start(rctx, query);
if (!query->buffer.buf)
util_query_clear_result(result, query->b.type);
}
-boolean r600_query_hw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait, union pipe_query_result *result)
+bool r600_query_hw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait, union pipe_query_result *result)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
if (!map)
- return FALSE;
+ return false;
while (results_base != qbuf->results_end) {
query->ops->add_result(rctx, query, map + results_base,
rquery->type == PIPE_QUERY_TIMESTAMP) {
result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
}
- return TRUE;
+ return true;
}
static void r600_render_condition(struct pipe_context *ctx,
assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
- ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
+ ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_start(ctx, query);
}
}
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
+ r600_resource_reference(&buffer, NULL);
if (mask != 0) {
ctx->backend_mask = mask;
X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
+ X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
+ X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
+ X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
+ X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
* which use it as a fallback path to detect the GPU type.
switch (info->query_type) {
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_VRAM_USAGE:
+ case R600_QUERY_MAPPED_VRAM:
info->max_value.u64 = rscreen->info.vram_size;
break;
case R600_QUERY_REQUESTED_GTT:
case R600_QUERY_GTT_USAGE:
+ case R600_QUERY_MAPPED_GTT:
info->max_value.u64 = rscreen->info.gart_size;
break;
case R600_QUERY_GPU_TEMPERATURE: