radeon: add query handler function pointers
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 struct r600_query_buffer {
30 /* The buffer where query results are stored. */
31 struct r600_resource *buf;
32 /* Offset of the next free result after current query data */
33 unsigned results_end;
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer *previous;
38 };
39
40 struct r600_query {
41 struct r600_query_ops *ops;
42
43 /* The query buffer and how many results are in it. */
44 struct r600_query_buffer buffer;
45 /* The type of query */
46 unsigned type;
47 /* Size of the result in memory for both begin_query and end_query,
48 * this can be one or two numbers, or it could even be a size of a structure. */
49 unsigned result_size;
50 /* The number of dwords for begin_query or end_query. */
51 unsigned num_cs_dw;
52 /* linked list of queries */
53 struct list_head list;
54 /* for custom non-GPU queries */
55 uint64_t begin_result;
56 uint64_t end_result;
57 /* Fence for GPU_FINISHED. */
58 struct pipe_fence_handle *fence;
59 /* For transform feedback: which stream the query is for */
60 unsigned stream;
61 };
62
63 static void r600_do_destroy_query(struct r600_common_context *, struct r600_query *);
64 static boolean r600_do_begin_query(struct r600_common_context *, struct r600_query *);
65 static void r600_do_end_query(struct r600_common_context *, struct r600_query *);
66 static boolean r600_do_get_query_result(struct r600_common_context *,
67 struct r600_query *, boolean wait,
68 union pipe_query_result *result);
69
70 static struct r600_query_ops legacy_query_ops = {
71 .destroy = r600_do_destroy_query,
72 .begin = r600_do_begin_query,
73 .end = r600_do_end_query,
74 .get_result = r600_do_get_query_result,
75 };
76
77 static bool r600_is_timer_query(unsigned type)
78 {
79 return type == PIPE_QUERY_TIME_ELAPSED ||
80 type == PIPE_QUERY_TIMESTAMP;
81 }
82
83 static bool r600_query_needs_begin(unsigned type)
84 {
85 return type != PIPE_QUERY_GPU_FINISHED &&
86 type != PIPE_QUERY_TIMESTAMP;
87 }
88
89 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
90 {
91 unsigned j, i, num_results, buf_size = 4096;
92 uint32_t *results;
93
94 /* Non-GPU queries. */
95 switch (type) {
96 case PIPE_QUERY_TIMESTAMP_DISJOINT:
97 case PIPE_QUERY_GPU_FINISHED:
98 case R600_QUERY_DRAW_CALLS:
99 case R600_QUERY_REQUESTED_VRAM:
100 case R600_QUERY_REQUESTED_GTT:
101 case R600_QUERY_BUFFER_WAIT_TIME:
102 case R600_QUERY_NUM_CS_FLUSHES:
103 case R600_QUERY_NUM_BYTES_MOVED:
104 case R600_QUERY_VRAM_USAGE:
105 case R600_QUERY_GTT_USAGE:
106 case R600_QUERY_GPU_TEMPERATURE:
107 case R600_QUERY_CURRENT_GPU_SCLK:
108 case R600_QUERY_CURRENT_GPU_MCLK:
109 case R600_QUERY_GPU_LOAD:
110 case R600_QUERY_NUM_COMPILATIONS:
111 case R600_QUERY_NUM_SHADERS_CREATED:
112 return NULL;
113 }
114
115 /* Queries are normally read by the CPU after
116 * being written by the gpu, hence staging is probably a good
117 * usage pattern.
118 */
119 struct r600_resource *buf = (struct r600_resource*)
120 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
121 PIPE_USAGE_STAGING, buf_size);
122
123 switch (type) {
124 case PIPE_QUERY_OCCLUSION_COUNTER:
125 case PIPE_QUERY_OCCLUSION_PREDICATE:
126 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
127 memset(results, 0, buf_size);
128
129 /* Set top bits for unused backends. */
130 num_results = buf_size / (16 * ctx->max_db);
131 for (j = 0; j < num_results; j++) {
132 for (i = 0; i < ctx->max_db; i++) {
133 if (!(ctx->backend_mask & (1<<i))) {
134 results[(i * 4)+1] = 0x80000000;
135 results[(i * 4)+3] = 0x80000000;
136 }
137 }
138 results += 4 * ctx->max_db;
139 }
140 break;
141 case PIPE_QUERY_TIME_ELAPSED:
142 case PIPE_QUERY_TIMESTAMP:
143 break;
144 case PIPE_QUERY_PRIMITIVES_EMITTED:
145 case PIPE_QUERY_PRIMITIVES_GENERATED:
146 case PIPE_QUERY_SO_STATISTICS:
147 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
148 case PIPE_QUERY_PIPELINE_STATISTICS:
149 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
150 memset(results, 0, buf_size);
151 break;
152 default:
153 assert(0);
154 }
155 return buf;
156 }
157
158 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
159 unsigned type, int diff)
160 {
161 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
162 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
163 bool old_enable = rctx->num_occlusion_queries != 0;
164 bool enable;
165
166 rctx->num_occlusion_queries += diff;
167 assert(rctx->num_occlusion_queries >= 0);
168
169 enable = rctx->num_occlusion_queries != 0;
170
171 if (enable != old_enable) {
172 rctx->set_occlusion_query_state(&rctx->b, enable);
173 }
174 }
175 }
176
177 static unsigned event_type_for_stream(struct r600_query *query)
178 {
179 switch (query->stream) {
180 default:
181 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
182 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
183 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
184 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
185 }
186 }
187
188 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
189 {
190 struct radeon_winsys_cs *cs = ctx->gfx.cs;
191 uint64_t va;
192
193 r600_update_occlusion_query_state(ctx, query->type, 1);
194 r600_update_prims_generated_query_state(ctx, query->type, 1);
195 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
196
197 /* Get a new query buffer if needed. */
198 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
199 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
200 *qbuf = query->buffer;
201 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
202 query->buffer.results_end = 0;
203 query->buffer.previous = qbuf;
204 }
205
206 /* emit begin query */
207 va = query->buffer.buf->gpu_address + query->buffer.results_end;
208
209 switch (query->type) {
210 case PIPE_QUERY_OCCLUSION_COUNTER:
211 case PIPE_QUERY_OCCLUSION_PREDICATE:
212 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
213 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
214 radeon_emit(cs, va);
215 radeon_emit(cs, (va >> 32) & 0xFFFF);
216 break;
217 case PIPE_QUERY_PRIMITIVES_EMITTED:
218 case PIPE_QUERY_PRIMITIVES_GENERATED:
219 case PIPE_QUERY_SO_STATISTICS:
220 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
221 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
222 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
223 radeon_emit(cs, va);
224 radeon_emit(cs, (va >> 32) & 0xFFFF);
225 break;
226 case PIPE_QUERY_TIME_ELAPSED:
227 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
228 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
229 radeon_emit(cs, va);
230 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
231 radeon_emit(cs, 0);
232 radeon_emit(cs, 0);
233 break;
234 case PIPE_QUERY_PIPELINE_STATISTICS:
235 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
236 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
237 radeon_emit(cs, va);
238 radeon_emit(cs, (va >> 32) & 0xFFFF);
239 break;
240 default:
241 assert(0);
242 }
243 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
244 RADEON_PRIO_QUERY);
245
246 if (r600_is_timer_query(query->type))
247 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
248 else
249 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
250 }
251
252 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
253 {
254 struct radeon_winsys_cs *cs = ctx->gfx.cs;
255 uint64_t va;
256
257 /* The queries which need begin already called this in begin_query. */
258 if (!r600_query_needs_begin(query->type)) {
259 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
260 }
261
262 va = query->buffer.buf->gpu_address;
263
264 /* emit end query */
265 switch (query->type) {
266 case PIPE_QUERY_OCCLUSION_COUNTER:
267 case PIPE_QUERY_OCCLUSION_PREDICATE:
268 va += query->buffer.results_end + 8;
269 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
270 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
271 radeon_emit(cs, va);
272 radeon_emit(cs, (va >> 32) & 0xFFFF);
273 break;
274 case PIPE_QUERY_PRIMITIVES_EMITTED:
275 case PIPE_QUERY_PRIMITIVES_GENERATED:
276 case PIPE_QUERY_SO_STATISTICS:
277 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
278 va += query->buffer.results_end + query->result_size/2;
279 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
280 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
281 radeon_emit(cs, va);
282 radeon_emit(cs, (va >> 32) & 0xFFFF);
283 break;
284 case PIPE_QUERY_TIME_ELAPSED:
285 va += query->buffer.results_end + query->result_size/2;
286 /* fall through */
287 case PIPE_QUERY_TIMESTAMP:
288 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
289 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
290 radeon_emit(cs, va);
291 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
292 radeon_emit(cs, 0);
293 radeon_emit(cs, 0);
294 break;
295 case PIPE_QUERY_PIPELINE_STATISTICS:
296 va += query->buffer.results_end + query->result_size/2;
297 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
298 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
299 radeon_emit(cs, va);
300 radeon_emit(cs, (va >> 32) & 0xFFFF);
301 break;
302 default:
303 assert(0);
304 }
305 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
306 RADEON_PRIO_QUERY);
307
308 query->buffer.results_end += query->result_size;
309
310 if (r600_query_needs_begin(query->type)) {
311 if (r600_is_timer_query(query->type))
312 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
313 else
314 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
315 }
316
317 r600_update_occlusion_query_state(ctx, query->type, -1);
318 r600_update_prims_generated_query_state(ctx, query->type, -1);
319 }
320
321 static void r600_emit_query_predication(struct r600_common_context *ctx,
322 struct r600_atom *atom)
323 {
324 struct radeon_winsys_cs *cs = ctx->gfx.cs;
325 struct r600_query *query = (struct r600_query*)ctx->render_cond;
326 struct r600_query_buffer *qbuf;
327 uint32_t op;
328 bool flag_wait;
329
330 if (!query)
331 return;
332
333 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
334 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
335
336 switch (query->type) {
337 case PIPE_QUERY_OCCLUSION_COUNTER:
338 case PIPE_QUERY_OCCLUSION_PREDICATE:
339 op = PRED_OP(PREDICATION_OP_ZPASS);
340 break;
341 case PIPE_QUERY_PRIMITIVES_EMITTED:
342 case PIPE_QUERY_PRIMITIVES_GENERATED:
343 case PIPE_QUERY_SO_STATISTICS:
344 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
345 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
346 break;
347 default:
348 assert(0);
349 return;
350 }
351
352 /* if true then invert, see GL_ARB_conditional_render_inverted */
353 if (ctx->render_cond_invert)
354 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
355 else
356 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
357
358 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
359
360 /* emit predicate packets for all data blocks */
361 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
362 unsigned results_base = 0;
363 uint64_t va = qbuf->buf->gpu_address;
364
365 while (results_base < qbuf->results_end) {
366 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
367 radeon_emit(cs, va + results_base);
368 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
369 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
370 RADEON_PRIO_QUERY);
371 results_base += query->result_size;
372
373 /* set CONTINUE bit for all packets except the first */
374 op |= PREDICATION_CONTINUE;
375 }
376 }
377 }
378
379 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
380 {
381 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
382 struct r600_query *query;
383 bool skip_allocation = false;
384
385 query = CALLOC_STRUCT(r600_query);
386 if (query == NULL)
387 return NULL;
388
389 query->type = query_type;
390 query->ops = &legacy_query_ops;
391
392 switch (query_type) {
393 case PIPE_QUERY_OCCLUSION_COUNTER:
394 case PIPE_QUERY_OCCLUSION_PREDICATE:
395 query->result_size = 16 * rctx->max_db;
396 query->num_cs_dw = 6;
397 break;
398 case PIPE_QUERY_TIME_ELAPSED:
399 query->result_size = 16;
400 query->num_cs_dw = 8;
401 break;
402 case PIPE_QUERY_TIMESTAMP:
403 query->result_size = 8;
404 query->num_cs_dw = 8;
405 break;
406 case PIPE_QUERY_PRIMITIVES_EMITTED:
407 case PIPE_QUERY_PRIMITIVES_GENERATED:
408 case PIPE_QUERY_SO_STATISTICS:
409 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
410 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
411 query->result_size = 32;
412 query->num_cs_dw = 6;
413 query->stream = index;
414 break;
415 case PIPE_QUERY_PIPELINE_STATISTICS:
416 /* 11 values on EG, 8 on R600. */
417 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
418 query->num_cs_dw = 6;
419 break;
420 /* Non-GPU queries and queries not requiring a buffer. */
421 case PIPE_QUERY_TIMESTAMP_DISJOINT:
422 case PIPE_QUERY_GPU_FINISHED:
423 case R600_QUERY_DRAW_CALLS:
424 case R600_QUERY_REQUESTED_VRAM:
425 case R600_QUERY_REQUESTED_GTT:
426 case R600_QUERY_BUFFER_WAIT_TIME:
427 case R600_QUERY_NUM_CS_FLUSHES:
428 case R600_QUERY_NUM_BYTES_MOVED:
429 case R600_QUERY_VRAM_USAGE:
430 case R600_QUERY_GTT_USAGE:
431 case R600_QUERY_GPU_TEMPERATURE:
432 case R600_QUERY_CURRENT_GPU_SCLK:
433 case R600_QUERY_CURRENT_GPU_MCLK:
434 case R600_QUERY_GPU_LOAD:
435 case R600_QUERY_NUM_COMPILATIONS:
436 case R600_QUERY_NUM_SHADERS_CREATED:
437 skip_allocation = true;
438 break;
439 default:
440 assert(0);
441 FREE(query);
442 return NULL;
443 }
444
445 if (!skip_allocation) {
446 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
447 if (!query->buffer.buf) {
448 FREE(query);
449 return NULL;
450 }
451 }
452 return (struct pipe_query*)query;
453 }
454
455 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
456 {
457 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
458 struct r600_query *rquery = (struct r600_query *)query;
459
460 rquery->ops->destroy(rctx, rquery);
461 }
462
463 static void r600_do_destroy_query(struct r600_common_context *rctx,
464 struct r600_query *rquery)
465 {
466 struct r600_query_buffer *prev = rquery->buffer.previous;
467
468 /* Release all query buffers. */
469 while (prev) {
470 struct r600_query_buffer *qbuf = prev;
471 prev = prev->previous;
472 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
473 FREE(qbuf);
474 }
475
476 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
477 FREE(rquery);
478 }
479
480 static boolean r600_begin_query(struct pipe_context *ctx,
481 struct pipe_query *query)
482 {
483 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
484 struct r600_query *rquery = (struct r600_query *)query;
485
486 return rquery->ops->begin(rctx, rquery);
487 }
488
489 static boolean r600_do_begin_query(struct r600_common_context *rctx,
490 struct r600_query *rquery)
491 {
492 struct r600_query_buffer *prev = rquery->buffer.previous;
493
494 if (!r600_query_needs_begin(rquery->type)) {
495 assert(0);
496 return false;
497 }
498
499 /* Non-GPU queries. */
500 switch (rquery->type) {
501 case PIPE_QUERY_TIMESTAMP_DISJOINT:
502 return true;
503 case R600_QUERY_DRAW_CALLS:
504 rquery->begin_result = rctx->num_draw_calls;
505 return true;
506 case R600_QUERY_REQUESTED_VRAM:
507 case R600_QUERY_REQUESTED_GTT:
508 case R600_QUERY_VRAM_USAGE:
509 case R600_QUERY_GTT_USAGE:
510 case R600_QUERY_GPU_TEMPERATURE:
511 case R600_QUERY_CURRENT_GPU_SCLK:
512 case R600_QUERY_CURRENT_GPU_MCLK:
513 rquery->begin_result = 0;
514 return true;
515 case R600_QUERY_BUFFER_WAIT_TIME:
516 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
517 return true;
518 case R600_QUERY_NUM_CS_FLUSHES:
519 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
520 return true;
521 case R600_QUERY_NUM_BYTES_MOVED:
522 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
523 return true;
524 case R600_QUERY_GPU_LOAD:
525 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
526 return true;
527 case R600_QUERY_NUM_COMPILATIONS:
528 rquery->begin_result = p_atomic_read(&rctx->screen->num_compilations);
529 return true;
530 case R600_QUERY_NUM_SHADERS_CREATED:
531 rquery->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
532 return true;
533 }
534
535 /* Discard the old query buffers. */
536 while (prev) {
537 struct r600_query_buffer *qbuf = prev;
538 prev = prev->previous;
539 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
540 FREE(qbuf);
541 }
542
543 /* Obtain a new buffer if the current one can't be mapped without a stall. */
544 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
545 !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
546 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
547 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
548 }
549
550 rquery->buffer.results_end = 0;
551 rquery->buffer.previous = NULL;
552
553 r600_emit_query_begin(rctx, rquery);
554
555 if (r600_is_timer_query(rquery->type))
556 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
557 else
558 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
559 return true;
560 }
561
562 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
563 {
564 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
565 struct r600_query *rquery = (struct r600_query *)query;
566
567 rquery->ops->end(rctx, rquery);
568 }
569
570 static void r600_do_end_query(struct r600_common_context *rctx,
571 struct r600_query *rquery)
572 {
573 /* Non-GPU queries. */
574 switch (rquery->type) {
575 case PIPE_QUERY_TIMESTAMP_DISJOINT:
576 return;
577 case PIPE_QUERY_GPU_FINISHED:
578 rctx->b.flush(&rctx->b, &rquery->fence, 0);
579 return;
580 case R600_QUERY_DRAW_CALLS:
581 rquery->end_result = rctx->num_draw_calls;
582 return;
583 case R600_QUERY_REQUESTED_VRAM:
584 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
585 return;
586 case R600_QUERY_REQUESTED_GTT:
587 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
588 return;
589 case R600_QUERY_BUFFER_WAIT_TIME:
590 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
591 return;
592 case R600_QUERY_NUM_CS_FLUSHES:
593 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
594 return;
595 case R600_QUERY_NUM_BYTES_MOVED:
596 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
597 return;
598 case R600_QUERY_VRAM_USAGE:
599 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
600 return;
601 case R600_QUERY_GTT_USAGE:
602 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
603 return;
604 case R600_QUERY_GPU_TEMPERATURE:
605 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
606 return;
607 case R600_QUERY_CURRENT_GPU_SCLK:
608 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
609 return;
610 case R600_QUERY_CURRENT_GPU_MCLK:
611 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
612 return;
613 case R600_QUERY_GPU_LOAD:
614 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
615 return;
616 case R600_QUERY_NUM_COMPILATIONS:
617 rquery->end_result = p_atomic_read(&rctx->screen->num_compilations);
618 return;
619 case R600_QUERY_NUM_SHADERS_CREATED:
620 rquery->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
621 return;
622 }
623
624 r600_emit_query_end(rctx, rquery);
625
626 if (r600_query_needs_begin(rquery->type))
627 LIST_DELINIT(&rquery->list);
628 }
629
630 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
631 bool test_status_bit)
632 {
633 uint32_t *current_result = (uint32_t*)map;
634 uint64_t start, end;
635
636 start = (uint64_t)current_result[start_index] |
637 (uint64_t)current_result[start_index+1] << 32;
638 end = (uint64_t)current_result[end_index] |
639 (uint64_t)current_result[end_index+1] << 32;
640
641 if (!test_status_bit ||
642 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
643 return end - start;
644 }
645 return 0;
646 }
647
648 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
649 struct r600_query *query,
650 struct r600_query_buffer *qbuf,
651 boolean wait,
652 union pipe_query_result *result)
653 {
654 struct pipe_screen *screen = ctx->b.screen;
655 unsigned results_base = 0;
656 char *map;
657
658 /* Non-GPU queries. */
659 switch (query->type) {
660 case PIPE_QUERY_TIMESTAMP_DISJOINT:
661 /* Convert from cycles per millisecond to cycles per second (Hz). */
662 result->timestamp_disjoint.frequency =
663 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
664 result->timestamp_disjoint.disjoint = FALSE;
665 return TRUE;
666 case PIPE_QUERY_GPU_FINISHED:
667 result->b = screen->fence_finish(screen, query->fence,
668 wait ? PIPE_TIMEOUT_INFINITE : 0);
669 return result->b;
670 case R600_QUERY_DRAW_CALLS:
671 case R600_QUERY_REQUESTED_VRAM:
672 case R600_QUERY_REQUESTED_GTT:
673 case R600_QUERY_BUFFER_WAIT_TIME:
674 case R600_QUERY_NUM_CS_FLUSHES:
675 case R600_QUERY_NUM_BYTES_MOVED:
676 case R600_QUERY_VRAM_USAGE:
677 case R600_QUERY_GTT_USAGE:
678 case R600_QUERY_GPU_TEMPERATURE:
679 case R600_QUERY_CURRENT_GPU_SCLK:
680 case R600_QUERY_CURRENT_GPU_MCLK:
681 case R600_QUERY_NUM_COMPILATIONS:
682 case R600_QUERY_NUM_SHADERS_CREATED:
683 result->u64 = query->end_result - query->begin_result;
684 return TRUE;
685 case R600_QUERY_GPU_LOAD:
686 result->u64 = query->end_result;
687 return TRUE;
688 }
689
690 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
691 PIPE_TRANSFER_READ |
692 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
693 if (!map)
694 return FALSE;
695
696 /* count all results across all data blocks */
697 switch (query->type) {
698 case PIPE_QUERY_OCCLUSION_COUNTER:
699 while (results_base != qbuf->results_end) {
700 result->u64 +=
701 r600_query_read_result(map + results_base, 0, 2, true);
702 results_base += 16;
703 }
704 break;
705 case PIPE_QUERY_OCCLUSION_PREDICATE:
706 while (results_base != qbuf->results_end) {
707 result->b = result->b ||
708 r600_query_read_result(map + results_base, 0, 2, true) != 0;
709 results_base += 16;
710 }
711 break;
712 case PIPE_QUERY_TIME_ELAPSED:
713 while (results_base != qbuf->results_end) {
714 result->u64 +=
715 r600_query_read_result(map + results_base, 0, 2, false);
716 results_base += query->result_size;
717 }
718 break;
719 case PIPE_QUERY_TIMESTAMP:
720 {
721 uint32_t *current_result = (uint32_t*)map;
722 result->u64 = (uint64_t)current_result[0] |
723 (uint64_t)current_result[1] << 32;
724 break;
725 }
726 case PIPE_QUERY_PRIMITIVES_EMITTED:
727 /* SAMPLE_STREAMOUTSTATS stores this structure:
728 * {
729 * u64 NumPrimitivesWritten;
730 * u64 PrimitiveStorageNeeded;
731 * }
732 * We only need NumPrimitivesWritten here. */
733 while (results_base != qbuf->results_end) {
734 result->u64 +=
735 r600_query_read_result(map + results_base, 2, 6, true);
736 results_base += query->result_size;
737 }
738 break;
739 case PIPE_QUERY_PRIMITIVES_GENERATED:
740 /* Here we read PrimitiveStorageNeeded. */
741 while (results_base != qbuf->results_end) {
742 result->u64 +=
743 r600_query_read_result(map + results_base, 0, 4, true);
744 results_base += query->result_size;
745 }
746 break;
747 case PIPE_QUERY_SO_STATISTICS:
748 while (results_base != qbuf->results_end) {
749 result->so_statistics.num_primitives_written +=
750 r600_query_read_result(map + results_base, 2, 6, true);
751 result->so_statistics.primitives_storage_needed +=
752 r600_query_read_result(map + results_base, 0, 4, true);
753 results_base += query->result_size;
754 }
755 break;
756 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
757 while (results_base != qbuf->results_end) {
758 result->b = result->b ||
759 r600_query_read_result(map + results_base, 2, 6, true) !=
760 r600_query_read_result(map + results_base, 0, 4, true);
761 results_base += query->result_size;
762 }
763 break;
764 case PIPE_QUERY_PIPELINE_STATISTICS:
765 if (ctx->chip_class >= EVERGREEN) {
766 while (results_base != qbuf->results_end) {
767 result->pipeline_statistics.ps_invocations +=
768 r600_query_read_result(map + results_base, 0, 22, false);
769 result->pipeline_statistics.c_primitives +=
770 r600_query_read_result(map + results_base, 2, 24, false);
771 result->pipeline_statistics.c_invocations +=
772 r600_query_read_result(map + results_base, 4, 26, false);
773 result->pipeline_statistics.vs_invocations +=
774 r600_query_read_result(map + results_base, 6, 28, false);
775 result->pipeline_statistics.gs_invocations +=
776 r600_query_read_result(map + results_base, 8, 30, false);
777 result->pipeline_statistics.gs_primitives +=
778 r600_query_read_result(map + results_base, 10, 32, false);
779 result->pipeline_statistics.ia_primitives +=
780 r600_query_read_result(map + results_base, 12, 34, false);
781 result->pipeline_statistics.ia_vertices +=
782 r600_query_read_result(map + results_base, 14, 36, false);
783 result->pipeline_statistics.hs_invocations +=
784 r600_query_read_result(map + results_base, 16, 38, false);
785 result->pipeline_statistics.ds_invocations +=
786 r600_query_read_result(map + results_base, 18, 40, false);
787 result->pipeline_statistics.cs_invocations +=
788 r600_query_read_result(map + results_base, 20, 42, false);
789 results_base += query->result_size;
790 }
791 } else {
792 while (results_base != qbuf->results_end) {
793 result->pipeline_statistics.ps_invocations +=
794 r600_query_read_result(map + results_base, 0, 16, false);
795 result->pipeline_statistics.c_primitives +=
796 r600_query_read_result(map + results_base, 2, 18, false);
797 result->pipeline_statistics.c_invocations +=
798 r600_query_read_result(map + results_base, 4, 20, false);
799 result->pipeline_statistics.vs_invocations +=
800 r600_query_read_result(map + results_base, 6, 22, false);
801 result->pipeline_statistics.gs_invocations +=
802 r600_query_read_result(map + results_base, 8, 24, false);
803 result->pipeline_statistics.gs_primitives +=
804 r600_query_read_result(map + results_base, 10, 26, false);
805 result->pipeline_statistics.ia_primitives +=
806 r600_query_read_result(map + results_base, 12, 28, false);
807 result->pipeline_statistics.ia_vertices +=
808 r600_query_read_result(map + results_base, 14, 30, false);
809 results_base += query->result_size;
810 }
811 }
812 #if 0 /* for testing */
813 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
814 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
815 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
816 result->pipeline_statistics.ia_vertices,
817 result->pipeline_statistics.ia_primitives,
818 result->pipeline_statistics.vs_invocations,
819 result->pipeline_statistics.hs_invocations,
820 result->pipeline_statistics.ds_invocations,
821 result->pipeline_statistics.gs_invocations,
822 result->pipeline_statistics.gs_primitives,
823 result->pipeline_statistics.c_invocations,
824 result->pipeline_statistics.c_primitives,
825 result->pipeline_statistics.ps_invocations,
826 result->pipeline_statistics.cs_invocations);
827 #endif
828 break;
829 default:
830 assert(0);
831 }
832
833 return TRUE;
834 }
835
836 static boolean r600_get_query_result(struct pipe_context *ctx,
837 struct pipe_query *query, boolean wait,
838 union pipe_query_result *result)
839 {
840 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
841 struct r600_query *rquery = (struct r600_query *)query;
842
843 return rquery->ops->get_result(rctx, rquery, wait, result);
844 }
845
846 static boolean r600_do_get_query_result(struct r600_common_context *rctx,
847 struct r600_query *rquery,
848 boolean wait, union pipe_query_result *result)
849 {
850 struct r600_query_buffer *qbuf;
851
852 util_query_clear_result(result, rquery->type);
853
854 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
855 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
856 return FALSE;
857 }
858 }
859
860 /* Convert the time to expected units. */
861 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
862 rquery->type == PIPE_QUERY_TIMESTAMP) {
863 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
864 }
865 return TRUE;
866 }
867
868 static void r600_render_condition(struct pipe_context *ctx,
869 struct pipe_query *query,
870 boolean condition,
871 uint mode)
872 {
873 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
874 struct r600_query *rquery = (struct r600_query*)query;
875 struct r600_query_buffer *qbuf;
876 struct r600_atom *atom = &rctx->render_cond_atom;
877
878 rctx->render_cond = query;
879 rctx->render_cond_invert = condition;
880 rctx->render_cond_mode = mode;
881
882 /* Compute the size of SET_PREDICATION packets. */
883 atom->num_dw = 0;
884 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
885 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
886
887 rctx->set_atom_dirty(rctx, atom, query != NULL);
888 }
889
890 static void r600_suspend_queries(struct r600_common_context *ctx,
891 struct list_head *query_list,
892 unsigned *num_cs_dw_queries_suspend)
893 {
894 struct r600_query *query;
895
896 LIST_FOR_EACH_ENTRY(query, query_list, list) {
897 r600_emit_query_end(ctx, query);
898 }
899 assert(*num_cs_dw_queries_suspend == 0);
900 }
901
902 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
903 {
904 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
905 &ctx->num_cs_dw_nontimer_queries_suspend);
906 }
907
908 void r600_suspend_timer_queries(struct r600_common_context *ctx)
909 {
910 r600_suspend_queries(ctx, &ctx->active_timer_queries,
911 &ctx->num_cs_dw_timer_queries_suspend);
912 }
913
914 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
915 struct list_head *query_list)
916 {
917 struct r600_query *query;
918 unsigned num_dw = 0;
919
920 LIST_FOR_EACH_ENTRY(query, query_list, list) {
921 /* begin + end */
922 num_dw += query->num_cs_dw * 2;
923
924 /* Workaround for the fact that
925 * num_cs_dw_nontimer_queries_suspend is incremented for every
926 * resumed query, which raises the bar in need_cs_space for
927 * queries about to be resumed.
928 */
929 num_dw += query->num_cs_dw;
930 }
931 /* primitives generated query */
932 num_dw += ctx->streamout.enable_atom.num_dw;
933 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
934 num_dw += 13;
935
936 return num_dw;
937 }
938
939 static void r600_resume_queries(struct r600_common_context *ctx,
940 struct list_head *query_list,
941 unsigned *num_cs_dw_queries_suspend)
942 {
943 struct r600_query *query;
944 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
945
946 assert(*num_cs_dw_queries_suspend == 0);
947
948 /* Check CS space here. Resuming must not be interrupted by flushes. */
949 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
950
951 LIST_FOR_EACH_ENTRY(query, query_list, list) {
952 r600_emit_query_begin(ctx, query);
953 }
954 }
955
956 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
957 {
958 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
959 &ctx->num_cs_dw_nontimer_queries_suspend);
960 }
961
962 void r600_resume_timer_queries(struct r600_common_context *ctx)
963 {
964 r600_resume_queries(ctx, &ctx->active_timer_queries,
965 &ctx->num_cs_dw_timer_queries_suspend);
966 }
967
968 /* Get backends mask */
969 void r600_query_init_backend_mask(struct r600_common_context *ctx)
970 {
971 struct radeon_winsys_cs *cs = ctx->gfx.cs;
972 struct r600_resource *buffer;
973 uint32_t *results;
974 unsigned num_backends = ctx->screen->info.r600_num_backends;
975 unsigned i, mask = 0;
976
977 /* if backend_map query is supported by the kernel */
978 if (ctx->screen->info.r600_backend_map_valid) {
979 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
980 unsigned backend_map = ctx->screen->info.r600_backend_map;
981 unsigned item_width, item_mask;
982
983 if (ctx->chip_class >= EVERGREEN) {
984 item_width = 4;
985 item_mask = 0x7;
986 } else {
987 item_width = 2;
988 item_mask = 0x3;
989 }
990
991 while(num_tile_pipes--) {
992 i = backend_map & item_mask;
993 mask |= (1<<i);
994 backend_map >>= item_width;
995 }
996 if (mask != 0) {
997 ctx->backend_mask = mask;
998 return;
999 }
1000 }
1001
1002 /* otherwise backup path for older kernels */
1003
1004 /* create buffer for event data */
1005 buffer = (struct r600_resource*)
1006 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1007 PIPE_USAGE_STAGING, ctx->max_db*16);
1008 if (!buffer)
1009 goto err;
1010
1011 /* initialize buffer with zeroes */
1012 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1013 if (results) {
1014 memset(results, 0, ctx->max_db * 4 * 4);
1015
1016 /* emit EVENT_WRITE for ZPASS_DONE */
1017 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1018 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1019 radeon_emit(cs, buffer->gpu_address);
1020 radeon_emit(cs, buffer->gpu_address >> 32);
1021
1022 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1023 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1024
1025 /* analyze results */
1026 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1027 if (results) {
1028 for(i = 0; i < ctx->max_db; i++) {
1029 /* at least highest bit will be set if backend is used */
1030 if (results[i*4 + 1])
1031 mask |= (1<<i);
1032 }
1033 }
1034 }
1035
1036 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1037
1038 if (mask != 0) {
1039 ctx->backend_mask = mask;
1040 return;
1041 }
1042
1043 err:
1044 /* fallback to old method - set num_backends lower bits to 1 */
1045 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1046 return;
1047 }
1048
1049 #define X(name_, query_type_, type_, result_type_) \
1050 { \
1051 .name = name_, \
1052 .query_type = R600_QUERY_##query_type_, \
1053 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1054 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1055 .group_id = ~(unsigned)0 \
1056 }
1057
1058 static struct pipe_driver_query_info r600_driver_query_list[] = {
1059 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1060 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1061 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1062 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1063 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1064 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1065 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1066 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1067 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1068 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1069 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1070 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1071 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1072 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1073 };
1074
1075 #undef X
1076
1077 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1078 {
1079 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1080 return Elements(r600_driver_query_list);
1081 else if (rscreen->info.drm_major == 3)
1082 return Elements(r600_driver_query_list) - 3;
1083 else
1084 return Elements(r600_driver_query_list) - 4;
1085 }
1086
1087 static int r600_get_driver_query_info(struct pipe_screen *screen,
1088 unsigned index,
1089 struct pipe_driver_query_info *info)
1090 {
1091 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1092 unsigned num_queries = r600_get_num_queries(rscreen);
1093
1094 if (!info)
1095 return num_queries;
1096
1097 if (index >= num_queries)
1098 return 0;
1099
1100 *info = r600_driver_query_list[index];
1101
1102 switch (info->query_type) {
1103 case R600_QUERY_REQUESTED_VRAM:
1104 case R600_QUERY_VRAM_USAGE:
1105 info->max_value.u64 = rscreen->info.vram_size;
1106 break;
1107 case R600_QUERY_REQUESTED_GTT:
1108 case R600_QUERY_GTT_USAGE:
1109 info->max_value.u64 = rscreen->info.gart_size;
1110 break;
1111 case R600_QUERY_GPU_TEMPERATURE:
1112 info->max_value.u64 = 125;
1113 break;
1114 }
1115
1116 return 1;
1117 }
1118
1119 void r600_query_init(struct r600_common_context *rctx)
1120 {
1121 rctx->b.create_query = r600_create_query;
1122 rctx->b.destroy_query = r600_destroy_query;
1123 rctx->b.begin_query = r600_begin_query;
1124 rctx->b.end_query = r600_end_query;
1125 rctx->b.get_query_result = r600_get_query_result;
1126 rctx->render_cond_atom.emit = r600_emit_query_predication;
1127
1128 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
1129 rctx->b.render_condition = r600_render_condition;
1130
1131 LIST_INITHEAD(&rctx->active_nontimer_queries);
1132 LIST_INITHEAD(&rctx->active_timer_queries);
1133 }
1134
1135 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1136 {
1137 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1138 }