switch (type) {
case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
+ case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
+ case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
+ case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
}
}
-static boolean r600_query_sw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+static bool r600_query_sw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case R600_QUERY_DRAW_CALLS:
query->begin_result = rctx->num_draw_calls;
break;
+ case R600_QUERY_SPILL_DRAW_CALLS:
+ query->begin_result = rctx->num_spill_draw_calls;
+ break;
+ case R600_QUERY_COMPUTE_CALLS:
+ query->begin_result = rctx->num_compute_calls;
+ break;
+ case R600_QUERY_SPILL_COMPUTE_CALLS:
+ query->begin_result = rctx->num_spill_compute_calls;
+ break;
+ case R600_QUERY_DMA_CALLS:
+ query->begin_result = rctx->num_dma_calls;
+ break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_SCLK:
case R600_QUERY_CURRENT_GPU_MCLK:
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
query->begin_result = 0;
break;
case R600_QUERY_BUFFER_WAIT_TIME:
case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
case R600_QUERY_NUM_SHADERS_CREATED:
query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
break;
+ case R600_QUERY_GPIN_ASIC_ID:
+ case R600_QUERY_GPIN_NUM_SIMD:
+ case R600_QUERY_GPIN_NUM_RB:
+ case R600_QUERY_GPIN_NUM_SPI:
+ case R600_QUERY_GPIN_NUM_SE:
+ break;
default:
unreachable("r600_query_sw_begin: bad query type");
}
- return TRUE;
+ return true;
}
-static void r600_query_sw_end(struct r600_common_context *rctx,
+static bool r600_query_sw_end(struct r600_common_context *rctx,
struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
break;
case PIPE_QUERY_GPU_FINISHED:
- rctx->b.flush(&rctx->b, &query->fence, 0);
+ rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
break;
case R600_QUERY_DRAW_CALLS:
- query->begin_result = rctx->num_draw_calls;
+ query->end_result = rctx->num_draw_calls;
+ break;
+ case R600_QUERY_SPILL_DRAW_CALLS:
+ query->end_result = rctx->num_spill_draw_calls;
+ break;
+ case R600_QUERY_COMPUTE_CALLS:
+ query->end_result = rctx->num_compute_calls;
+ break;
+ case R600_QUERY_SPILL_COMPUTE_CALLS:
+ query->end_result = rctx->num_spill_compute_calls;
+ break;
+ case R600_QUERY_DMA_CALLS:
+ query->end_result = rctx->num_dma_calls;
break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_MCLK:
case R600_QUERY_BUFFER_WAIT_TIME:
case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
query->begin_result = 0;
break;
case R600_QUERY_NUM_COMPILATIONS:
- query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
+ query->end_result = p_atomic_read(&rctx->screen->num_compilations);
break;
case R600_QUERY_NUM_SHADERS_CREATED:
- query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
+ query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
+ break;
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
+ query->end_result = rctx->last_tex_ps_draw_ratio;
+ break;
+ case R600_QUERY_GPIN_ASIC_ID:
+ case R600_QUERY_GPIN_NUM_SIMD:
+ case R600_QUERY_GPIN_NUM_RB:
+ case R600_QUERY_GPIN_NUM_SPI:
+ case R600_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("r600_query_sw_end: bad query type");
}
+
+ return true;
}
-static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait,
- union pipe_query_result *result)
+static bool r600_query_sw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ union pipe_query_result *result)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
/* Convert from cycles per millisecond to cycles per second (Hz). */
result->timestamp_disjoint.frequency =
- (uint64_t)rctx->screen->info.r600_clock_crystal_freq * 1000;
- result->timestamp_disjoint.disjoint = FALSE;
- return TRUE;
+ (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
+ result->timestamp_disjoint.disjoint = false;
+ return true;
case PIPE_QUERY_GPU_FINISHED: {
struct pipe_screen *screen = rctx->b.screen;
- result->b = screen->fence_finish(screen, query->fence,
+ result->b = screen->fence_finish(screen, &rctx->b, query->fence,
wait ? PIPE_TIMEOUT_INFINITE : 0);
return result->b;
}
+
+ case R600_QUERY_GPIN_ASIC_ID:
+ result->u32 = 0;
+ return true;
+ case R600_QUERY_GPIN_NUM_SIMD:
+ result->u32 = rctx->screen->info.num_good_compute_units;
+ return true;
+ case R600_QUERY_GPIN_NUM_RB:
+ result->u32 = rctx->screen->info.num_render_backends;
+ return true;
+ case R600_QUERY_GPIN_NUM_SPI:
+ result->u32 = 1; /* all supported chips have one SPI per SE */
+ return true;
+ case R600_QUERY_GPIN_NUM_SE:
+ result->u32 = rctx->screen->info.max_se;
+ return true;
}
result->u64 = query->end_result - query->begin_result;
break;
}
- return TRUE;
+ return true;
}
static struct r600_query_ops sw_query_ops = {
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
FREE(rquery);
}
static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query)
{
- unsigned buf_size = MAX2(query->result_size, 4096);
+ unsigned buf_size = MAX2(query->result_size,
+ ctx->screen->info.gart_page_size);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
struct r600_resource *buf = (struct r600_resource*)
pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, buf_size);
+ if (!buf)
+ return NULL;
- if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
- query->ops->prepare_buffer(ctx, query, buf);
+ if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
+ if (!query->ops->prepare_buffer(ctx, query, buf)) {
+ r600_resource_reference(&buf, NULL);
+ return NULL;
+ }
+ }
return buf;
}
-static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
+static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
- uint32_t *results = ctx->ws->buffer_map(buffer->cs_buf, NULL,
+ uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED);
+ if (!results)
+ return false;
memset(results, 0, buffer->b.b.width0);
results += 4 * ctx->max_db;
}
}
+
+ return true;
}
static struct r600_query_ops query_hw_ops = {
.add_result = r600_query_hw_add_result,
};
-boolean r600_query_hw_init(struct r600_common_context *rctx,
- struct r600_query_hw *query)
+bool r600_query_hw_init(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
query->buffer.buf = r600_new_query_buffer(rctx, query);
if (!query->buffer.buf)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
query->result_size = 16;
query->num_cs_dw_begin = 8;
query->num_cs_dw_end = 8;
- query->flags = R600_QUERY_HW_FLAG_TIMER;
break;
case PIPE_QUERY_TIMESTAMP:
query->result_size = 8;
query->num_cs_dw_end = 8;
- query->flags = R600_QUERY_HW_FLAG_TIMER |
- R600_QUERY_HW_FLAG_NO_START;
+ query->flags = R600_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
type == PIPE_QUERY_OCCLUSION_PREDICATE) {
bool old_enable = rctx->num_occlusion_queries != 0;
- bool enable;
+ bool old_perfect_enable =
+ rctx->num_perfect_occlusion_queries != 0;
+ bool enable, perfect_enable;
rctx->num_occlusion_queries += diff;
assert(rctx->num_occlusion_queries >= 0);
+ if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
+ rctx->num_perfect_occlusion_queries += diff;
+ assert(rctx->num_perfect_occlusion_queries >= 0);
+ }
+
enable = rctx->num_occlusion_queries != 0;
+ perfect_enable = rctx->num_perfect_occlusion_queries != 0;
- if (enable != old_enable) {
+ if (enable != old_enable || perfect_enable != old_perfect_enable) {
rctx->set_occlusion_query_state(&rctx->b, enable);
}
}
break;
case PIPE_QUERY_TIME_ELAPSED:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
- radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
radeon_emit(cs, va);
radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
radeon_emit(cs, 0);
{
uint64_t va;
+ if (!query->buffer.buf)
+ return; // previous buffer allocation failure
+
r600_update_occlusion_query_state(ctx, query->b.type, 1);
r600_update_prims_generated_query_state(ctx, query->b.type, 1);
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
- TRUE);
+ true);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
*qbuf = query->buffer;
- query->buffer.buf = r600_new_query_buffer(ctx, query);
query->buffer.results_end = 0;
query->buffer.previous = qbuf;
+ query->buffer.buf = r600_new_query_buffer(ctx, query);
+ if (!query->buffer.buf)
+ return;
}
/* emit begin query */
query->ops->emit_start(ctx, query, query->buffer.buf, va);
- if (query->flags & R600_QUERY_HW_FLAG_TIMER)
- ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw_end;
- else
- ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw_end;
+ ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
}
static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
/* fall through */
case PIPE_QUERY_TIMESTAMP:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
- radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
radeon_emit(cs, va);
radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
radeon_emit(cs, 0);
{
uint64_t va;
+ if (!query->buffer.buf)
+ return; // previous buffer allocation failure
+
/* The queries which need begin already called this in begin_query. */
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
}
/* emit end query */
query->buffer.results_end += query->result_size;
- if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) {
- if (query->flags & R600_QUERY_HW_FLAG_TIMER)
- ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw_end;
- else
- ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw_end;
- }
+ if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
+ ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
r600_update_occlusion_query_state(ctx, query->b.type, -1);
r600_update_prims_generated_query_state(ctx, query->b.type, -1);
return rquery->ops->begin(rctx, rquery);
}
-static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
- struct r600_query_hw *query)
+void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
struct r600_query_buffer *prev = query->buffer.previous;
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
+ query->buffer.results_end = 0;
+ query->buffer.previous = NULL;
+
if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
/* Obtain a new buffer if the current one can't be mapped without a stall. */
- if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
+ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(rctx, query);
} else {
- query->ops->prepare_buffer(rctx, query, query->buffer.buf);
+ if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
+ r600_resource_reference(&query->buffer.buf, NULL);
}
}
-
- query->buffer.results_end = 0;
- query->buffer.previous = NULL;
}
-boolean r600_query_hw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+bool r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
return false;
}
- r600_query_hw_reset_buffers(rctx, query);
+ if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
+ r600_query_hw_reset_buffers(rctx, query);
r600_query_hw_emit_start(rctx, query);
+ if (!query->buffer.buf)
+ return false;
- if (query->flags & R600_QUERY_HW_FLAG_TIMER)
- LIST_ADDTAIL(&query->list, &rctx->active_timer_queries);
- else
- LIST_ADDTAIL(&query->list, &rctx->active_nontimer_queries);
- return true;
+ LIST_ADDTAIL(&query->list, &rctx->active_queries);
+ return true;
}
-static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
+static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
- rquery->ops->end(rctx, rquery);
+ return rquery->ops->end(rctx, rquery);
}
-void r600_query_hw_end(struct r600_common_context *rctx,
- struct r600_query *rquery)
+bool r600_query_hw_end(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
LIST_DELINIT(&query->list);
+
+ if (!query->buffer.buf)
+ return false;
+
+ return true;
}
static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
util_query_clear_result(result, query->b.type);
}
-boolean r600_query_hw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait, union pipe_query_result *result)
+bool r600_query_hw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait, union pipe_query_result *result)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
if (!map)
- return FALSE;
+ return false;
while (results_base != qbuf->results_end) {
query->ops->add_result(rctx, query, map + results_base,
/* Convert the time to expected units. */
if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
rquery->type == PIPE_QUERY_TIMESTAMP) {
- result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
+ result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
}
- return TRUE;
+ return true;
}
static void r600_render_condition(struct pipe_context *ctx,
rctx->set_atom_dirty(rctx, atom, query != NULL);
}
-static void r600_suspend_queries(struct r600_common_context *ctx,
- struct list_head *query_list,
- unsigned *num_cs_dw_queries_suspend)
+void r600_suspend_queries(struct r600_common_context *ctx)
{
struct r600_query_hw *query;
- LIST_FOR_EACH_ENTRY(query, query_list, list) {
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_stop(ctx, query);
}
- assert(*num_cs_dw_queries_suspend == 0);
-}
-
-void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
-{
- r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
- &ctx->num_cs_dw_nontimer_queries_suspend);
-}
-
-void r600_suspend_timer_queries(struct r600_common_context *ctx)
-{
- r600_suspend_queries(ctx, &ctx->active_timer_queries,
- &ctx->num_cs_dw_timer_queries_suspend);
+ assert(ctx->num_cs_dw_queries_suspend == 0);
}
static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
return num_dw;
}
-static void r600_resume_queries(struct r600_common_context *ctx,
- struct list_head *query_list,
- unsigned *num_cs_dw_queries_suspend)
+void r600_resume_queries(struct r600_common_context *ctx)
{
struct r600_query_hw *query;
- unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
+ unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
- assert(*num_cs_dw_queries_suspend == 0);
+ assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
- ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
+ ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
- LIST_FOR_EACH_ENTRY(query, query_list, list) {
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_start(ctx, query);
}
}
-void r600_resume_nontimer_queries(struct r600_common_context *ctx)
-{
- r600_resume_queries(ctx, &ctx->active_nontimer_queries,
- &ctx->num_cs_dw_nontimer_queries_suspend);
-}
-
-void r600_resume_timer_queries(struct r600_common_context *ctx)
-{
- r600_resume_queries(ctx, &ctx->active_timer_queries,
- &ctx->num_cs_dw_timer_queries_suspend);
-}
-
/* Get backends mask */
void r600_query_init_backend_mask(struct r600_common_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct r600_resource *buffer;
uint32_t *results;
- unsigned num_backends = ctx->screen->info.r600_num_backends;
+ unsigned num_backends = ctx->screen->info.num_render_backends;
unsigned i, mask = 0;
/* if backend_map query is supported by the kernel */
- if (ctx->screen->info.r600_backend_map_valid) {
- unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
- unsigned backend_map = ctx->screen->info.r600_backend_map;
+ if (ctx->screen->info.r600_gb_backend_map_valid) {
+ unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
+ unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
unsigned item_width, item_mask;
if (ctx->chip_class >= EVERGREEN) {
item_mask = 0x3;
}
- while(num_tile_pipes--) {
+ while (num_tile_pipes--) {
i = backend_map & item_mask;
mask |= (1<<i);
backend_map >>= item_width;
}
}
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
+ r600_resource_reference(&buffer, NULL);
if (mask != 0) {
ctx->backend_mask = mask;
return;
}
-#define X(name_, query_type_, type_, result_type_) \
+#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
{ \
.name = name_, \
.query_type = R600_QUERY_##query_type_, \
.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
- .group_id = ~(unsigned)0 \
+ .group_id = group_id_ \
}
+#define X(name_, query_type_, type_, result_type_) \
+ XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
+
+#define XG(group_, name_, query_type_, type_, result_type_) \
+ XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
+
static struct pipe_driver_query_info r600_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
- X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
+ X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
+ X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
+ X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
+ X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
+ X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
+ X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
+ X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
- X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
+ X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
+ X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
+ X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
+
+ /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
+ * which use it as a fallback path to detect the GPU type.
+ *
+ * Note: The names of these queries are significant for GPUPerfStudio
+ * (and possibly their order as well). */
+ XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
+ XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
+ XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
+ XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
+ XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
+
+ /* The following queries must be at the end of the list because their
+ * availability is adjusted dynamically based on the DRM version. */
X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
};
#undef X
+#undef XG
+#undef XFULL
static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
{
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
- return Elements(r600_driver_query_list);
+ return ARRAY_SIZE(r600_driver_query_list);
else if (rscreen->info.drm_major == 3)
- return Elements(r600_driver_query_list) - 3;
+ return ARRAY_SIZE(r600_driver_query_list) - 3;
else
- return Elements(r600_driver_query_list) - 4;
+ return ARRAY_SIZE(r600_driver_query_list) - 4;
}
static int r600_get_driver_query_info(struct pipe_screen *screen,
switch (info->query_type) {
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_VRAM_USAGE:
+ case R600_QUERY_MAPPED_VRAM:
info->max_value.u64 = rscreen->info.vram_size;
break;
case R600_QUERY_REQUESTED_GTT:
case R600_QUERY_GTT_USAGE:
+ case R600_QUERY_MAPPED_GTT:
info->max_value.u64 = rscreen->info.gart_size;
break;
case R600_QUERY_GPU_TEMPERATURE:
break;
}
+ if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
+ info->group_id += rscreen->perfcounters->num_groups;
+
return 1;
}
+/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
+ * performance counter groups, so be careful when changing this and related
+ * functions.
+ */
static int r600_get_driver_query_group_info(struct pipe_screen *screen,
unsigned index,
struct pipe_driver_query_group_info *info)
{
struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
+ unsigned num_pc_groups = 0;
+
+ if (rscreen->perfcounters)
+ num_pc_groups = rscreen->perfcounters->num_groups;
- return r600_get_perfcounter_group_info(rscreen, index, info);
+ if (!info)
+ return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
+
+ if (index < num_pc_groups)
+ return r600_get_perfcounter_group_info(rscreen, index, info);
+
+ index -= num_pc_groups;
+ if (index >= R600_NUM_SW_QUERY_GROUPS)
+ return 0;
+
+ info->name = "GPIN";
+ info->max_active_queries = 5;
+ info->num_queries = 5;
+ return 1;
}
void r600_query_init(struct r600_common_context *rctx)
rctx->b.get_query_result = r600_get_query_result;
rctx->render_cond_atom.emit = r600_emit_query_predication;
- if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
+ if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
rctx->b.render_condition = r600_render_condition;
- LIST_INITHEAD(&rctx->active_nontimer_queries);
- LIST_INITHEAD(&rctx->active_timer_queries);
+ LIST_INITHEAD(&rctx->active_queries);
}
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)