rscreen->info.r600_clock_crystal_freq;
}
+static int r600_get_driver_query_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_info *info)
+{
+ struct pipe_driver_query_info list[] = {
+ {"draw-calls", R600_QUERY_DRAW_CALLS, 0},
+ };
+
+ if (!info)
+ return Elements(list);
+
+ if (index >= Elements(list))
+ return 0;
+
+ *info = list[index];
+ return 1;
+}
+
struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
{
struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
rscreen->screen.fence_reference = r600_fence_reference;
rscreen->screen.fence_signalled = r600_fence_signalled;
rscreen->screen.fence_finish = r600_fence_finish;
+ rscreen->screen.get_driver_query_info = r600_get_driver_query_info;
r600_init_screen_resource_functions(&rscreen->screen);
util_format_s3tc_init();
#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 6)
#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 7)
+#define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0)
+
struct r600_context;
struct r600_bytecode;
struct r600_shader_key;
unsigned num_cs_dw;
/* linked list of queries */
struct list_head list;
+ /* for custom non-GPU queries */
+ uint64_t begin_result;
+ uint64_t end_result;
};
struct r600_so_target {
unsigned num_cs_dw_nontimer_queries_suspend;
/* If queries have been suspended. */
bool nontimer_queries_suspended;
+ unsigned num_draw_calls;
/* Render condition. */
struct pipe_query *current_render_cond;
{
unsigned j, i, num_results, buf_size = 4096;
uint32_t *results;
+
+ /* Non-GPU queries. */
+ switch (type) {
+ case R600_QUERY_DRAW_CALLS:
+ return NULL;
+ }
+
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
* usage pattern.
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type)
{
struct r600_context *rctx = (struct r600_context *)ctx;
-
struct r600_query *query;
+ bool skip_allocation = false;
query = CALLOC_STRUCT(r600_query);
if (query == NULL)
query->result_size = 32;
query->num_cs_dw = 6;
break;
+ /* Non-GPU queries. */
+ case R600_QUERY_DRAW_CALLS:
+ skip_allocation = true;
+ break;
default:
assert(0);
FREE(query);
return NULL;
}
- query->buffer.buf = r600_new_query_buffer(rctx, query_type);
- if (!query->buffer.buf) {
- FREE(query);
- return NULL;
+ if (!skip_allocation) {
+ query->buffer.buf = r600_new_query_buffer(rctx, query_type);
+ if (!query->buffer.buf) {
+ FREE(query);
+ return NULL;
+ }
}
return (struct pipe_query*)query;
}
return;
}
+ /* Non-GPU queries. */
+ switch (rquery->type) {
+ case R600_QUERY_DRAW_CALLS:
+ rquery->begin_result = rctx->num_draw_calls;
+ return;
+ }
+
/* Discard the old query buffers. */
while (prev) {
struct r600_query_buffer *qbuf = prev;
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
+ /* Non-GPU queries. */
+ switch (rquery->type) {
+ case R600_QUERY_DRAW_CALLS:
+ rquery->end_result = rctx->num_draw_calls;
+ return;
+ }
+
r600_emit_query_end(rctx, rquery);
if (r600_query_needs_begin(rquery->type) && !r600_is_timer_query(rquery->type)) {
unsigned results_base = 0;
char *map;
+ /* Non-GPU queries. */
+ switch (query->type) {
+ case R600_QUERY_DRAW_CALLS:
+ result->u64 = query->end_result - query->begin_result;
+ return TRUE;
+ }
+
map = r600_buffer_mmap_sync_with_rings(ctx, qbuf->buf,
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));