gallium/radeon: fix regression in a number of driver queries
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_REQUESTED_VRAM:
79 case R600_QUERY_REQUESTED_GTT:
80 case R600_QUERY_VRAM_USAGE:
81 case R600_QUERY_GTT_USAGE:
82 case R600_QUERY_GPU_TEMPERATURE:
83 case R600_QUERY_CURRENT_GPU_SCLK:
84 case R600_QUERY_CURRENT_GPU_MCLK:
85 query->begin_result = 0;
86 break;
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED: {
90 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
91 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
92 break;
93 }
94 case R600_QUERY_GPU_LOAD:
95 query->begin_result = r600_gpu_load_begin(rctx->screen);
96 break;
97 case R600_QUERY_NUM_COMPILATIONS:
98 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
99 break;
100 case R600_QUERY_NUM_SHADERS_CREATED:
101 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
102 break;
103 default:
104 unreachable("r600_query_sw_begin: bad query type");
105 }
106
107 return TRUE;
108 }
109
110 static void r600_query_sw_end(struct r600_common_context *rctx,
111 struct r600_query *rquery)
112 {
113 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
114
115 switch(query->b.type) {
116 case PIPE_QUERY_TIMESTAMP_DISJOINT:
117 break;
118 case PIPE_QUERY_GPU_FINISHED:
119 rctx->b.flush(&rctx->b, &query->fence, 0);
120 break;
121 case R600_QUERY_DRAW_CALLS:
122 query->end_result = rctx->num_draw_calls;
123 break;
124 case R600_QUERY_REQUESTED_VRAM:
125 case R600_QUERY_REQUESTED_GTT:
126 case R600_QUERY_VRAM_USAGE:
127 case R600_QUERY_GTT_USAGE:
128 case R600_QUERY_GPU_TEMPERATURE:
129 case R600_QUERY_CURRENT_GPU_SCLK:
130 case R600_QUERY_CURRENT_GPU_MCLK:
131 case R600_QUERY_BUFFER_WAIT_TIME:
132 case R600_QUERY_NUM_CS_FLUSHES:
133 case R600_QUERY_NUM_BYTES_MOVED: {
134 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
135 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
136 break;
137 }
138 case R600_QUERY_GPU_LOAD:
139 query->end_result = r600_gpu_load_end(rctx->screen,
140 query->begin_result);
141 query->begin_result = 0;
142 break;
143 case R600_QUERY_NUM_COMPILATIONS:
144 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
145 break;
146 case R600_QUERY_NUM_SHADERS_CREATED:
147 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
148 break;
149 default:
150 unreachable("r600_query_sw_end: bad query type");
151 }
152 }
153
154 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
155 struct r600_query *rquery,
156 boolean wait,
157 union pipe_query_result *result)
158 {
159 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
160
161 switch (query->b.type) {
162 case PIPE_QUERY_TIMESTAMP_DISJOINT:
163 /* Convert from cycles per millisecond to cycles per second (Hz). */
164 result->timestamp_disjoint.frequency =
165 (uint64_t)rctx->screen->info.r600_clock_crystal_freq * 1000;
166 result->timestamp_disjoint.disjoint = FALSE;
167 return TRUE;
168 case PIPE_QUERY_GPU_FINISHED: {
169 struct pipe_screen *screen = rctx->b.screen;
170 result->b = screen->fence_finish(screen, query->fence,
171 wait ? PIPE_TIMEOUT_INFINITE : 0);
172 return result->b;
173 }
174 }
175
176 result->u64 = query->end_result - query->begin_result;
177
178 switch (query->b.type) {
179 case R600_QUERY_BUFFER_WAIT_TIME:
180 case R600_QUERY_GPU_TEMPERATURE:
181 result->u64 /= 1000;
182 break;
183 case R600_QUERY_CURRENT_GPU_SCLK:
184 case R600_QUERY_CURRENT_GPU_MCLK:
185 result->u64 *= 1000000;
186 break;
187 }
188
189 return TRUE;
190 }
191
192 static struct r600_query_ops sw_query_ops = {
193 .destroy = r600_query_sw_destroy,
194 .begin = r600_query_sw_begin,
195 .end = r600_query_sw_end,
196 .get_result = r600_query_sw_get_result
197 };
198
199 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
200 unsigned query_type)
201 {
202 struct r600_query_sw *query;
203
204 query = CALLOC_STRUCT(r600_query_sw);
205 if (!query)
206 return NULL;
207
208 query->b.type = query_type;
209 query->b.ops = &sw_query_ops;
210
211 return (struct pipe_query *)query;
212 }
213
214 void r600_query_hw_destroy(struct r600_common_context *rctx,
215 struct r600_query *rquery)
216 {
217 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
218 struct r600_query_buffer *prev = query->buffer.previous;
219
220 /* Release all query buffers. */
221 while (prev) {
222 struct r600_query_buffer *qbuf = prev;
223 prev = prev->previous;
224 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
225 FREE(qbuf);
226 }
227
228 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
229 FREE(rquery);
230 }
231
232 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
233 struct r600_query_hw *query)
234 {
235 unsigned buf_size = MAX2(query->result_size, 4096);
236
237 /* Queries are normally read by the CPU after
238 * being written by the gpu, hence staging is probably a good
239 * usage pattern.
240 */
241 struct r600_resource *buf = (struct r600_resource*)
242 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
243 PIPE_USAGE_STAGING, buf_size);
244
245 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
246 query->ops->prepare_buffer(ctx, query, buf);
247
248 return buf;
249 }
250
251 static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
252 struct r600_query_hw *query,
253 struct r600_resource *buffer)
254 {
255 /* Callers ensure that the buffer is currently unused by the GPU. */
256 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
257 PIPE_TRANSFER_WRITE |
258 PIPE_TRANSFER_UNSYNCHRONIZED);
259
260 memset(results, 0, buffer->b.b.width0);
261
262 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
263 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
264 unsigned num_results;
265 unsigned i, j;
266
267 /* Set top bits for unused backends. */
268 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
269 for (j = 0; j < num_results; j++) {
270 for (i = 0; i < ctx->max_db; i++) {
271 if (!(ctx->backend_mask & (1<<i))) {
272 results[(i * 4)+1] = 0x80000000;
273 results[(i * 4)+3] = 0x80000000;
274 }
275 }
276 results += 4 * ctx->max_db;
277 }
278 }
279 }
280
281 static struct r600_query_ops query_hw_ops = {
282 .destroy = r600_query_hw_destroy,
283 .begin = r600_query_hw_begin,
284 .end = r600_query_hw_end,
285 .get_result = r600_query_hw_get_result,
286 };
287
288 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
289 struct r600_query_hw *query,
290 struct r600_resource *buffer,
291 uint64_t va);
292 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
293 struct r600_query_hw *query,
294 struct r600_resource *buffer,
295 uint64_t va);
296 static void r600_query_hw_add_result(struct r600_common_context *ctx,
297 struct r600_query_hw *, void *buffer,
298 union pipe_query_result *result);
299 static void r600_query_hw_clear_result(struct r600_query_hw *,
300 union pipe_query_result *);
301
302 static struct r600_query_hw_ops query_hw_default_hw_ops = {
303 .prepare_buffer = r600_query_hw_prepare_buffer,
304 .emit_start = r600_query_hw_do_emit_start,
305 .emit_stop = r600_query_hw_do_emit_stop,
306 .clear_result = r600_query_hw_clear_result,
307 .add_result = r600_query_hw_add_result,
308 };
309
310 boolean r600_query_hw_init(struct r600_common_context *rctx,
311 struct r600_query_hw *query)
312 {
313 query->buffer.buf = r600_new_query_buffer(rctx, query);
314 if (!query->buffer.buf)
315 return FALSE;
316
317 return TRUE;
318 }
319
320 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
321 unsigned query_type,
322 unsigned index)
323 {
324 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
325 if (!query)
326 return NULL;
327
328 query->b.type = query_type;
329 query->b.ops = &query_hw_ops;
330 query->ops = &query_hw_default_hw_ops;
331
332 switch (query_type) {
333 case PIPE_QUERY_OCCLUSION_COUNTER:
334 case PIPE_QUERY_OCCLUSION_PREDICATE:
335 query->result_size = 16 * rctx->max_db;
336 query->num_cs_dw_begin = 6;
337 query->num_cs_dw_end = 6;
338 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
339 break;
340 case PIPE_QUERY_TIME_ELAPSED:
341 query->result_size = 16;
342 query->num_cs_dw_begin = 8;
343 query->num_cs_dw_end = 8;
344 query->flags = R600_QUERY_HW_FLAG_TIMER;
345 break;
346 case PIPE_QUERY_TIMESTAMP:
347 query->result_size = 8;
348 query->num_cs_dw_end = 8;
349 query->flags = R600_QUERY_HW_FLAG_TIMER |
350 R600_QUERY_HW_FLAG_NO_START;
351 break;
352 case PIPE_QUERY_PRIMITIVES_EMITTED:
353 case PIPE_QUERY_PRIMITIVES_GENERATED:
354 case PIPE_QUERY_SO_STATISTICS:
355 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
356 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
357 query->result_size = 32;
358 query->num_cs_dw_begin = 6;
359 query->num_cs_dw_end = 6;
360 query->stream = index;
361 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
362 break;
363 case PIPE_QUERY_PIPELINE_STATISTICS:
364 /* 11 values on EG, 8 on R600. */
365 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
366 query->num_cs_dw_begin = 6;
367 query->num_cs_dw_end = 6;
368 break;
369 default:
370 assert(0);
371 FREE(query);
372 return NULL;
373 }
374
375 if (!r600_query_hw_init(rctx, query)) {
376 FREE(query);
377 return NULL;
378 }
379
380 return (struct pipe_query *)query;
381 }
382
383 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
384 unsigned type, int diff)
385 {
386 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
387 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
388 bool old_enable = rctx->num_occlusion_queries != 0;
389 bool enable;
390
391 rctx->num_occlusion_queries += diff;
392 assert(rctx->num_occlusion_queries >= 0);
393
394 enable = rctx->num_occlusion_queries != 0;
395
396 if (enable != old_enable) {
397 rctx->set_occlusion_query_state(&rctx->b, enable);
398 }
399 }
400 }
401
402 static unsigned event_type_for_stream(struct r600_query_hw *query)
403 {
404 switch (query->stream) {
405 default:
406 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
407 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
408 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
409 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
410 }
411 }
412
413 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
414 struct r600_query_hw *query,
415 struct r600_resource *buffer,
416 uint64_t va)
417 {
418 struct radeon_winsys_cs *cs = ctx->gfx.cs;
419
420 switch (query->b.type) {
421 case PIPE_QUERY_OCCLUSION_COUNTER:
422 case PIPE_QUERY_OCCLUSION_PREDICATE:
423 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
424 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
425 radeon_emit(cs, va);
426 radeon_emit(cs, (va >> 32) & 0xFFFF);
427 break;
428 case PIPE_QUERY_PRIMITIVES_EMITTED:
429 case PIPE_QUERY_PRIMITIVES_GENERATED:
430 case PIPE_QUERY_SO_STATISTICS:
431 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
432 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
433 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
434 radeon_emit(cs, va);
435 radeon_emit(cs, (va >> 32) & 0xFFFF);
436 break;
437 case PIPE_QUERY_TIME_ELAPSED:
438 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
439 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
440 radeon_emit(cs, va);
441 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
442 radeon_emit(cs, 0);
443 radeon_emit(cs, 0);
444 break;
445 case PIPE_QUERY_PIPELINE_STATISTICS:
446 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
447 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
448 radeon_emit(cs, va);
449 radeon_emit(cs, (va >> 32) & 0xFFFF);
450 break;
451 default:
452 assert(0);
453 }
454 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
455 RADEON_PRIO_QUERY);
456 }
457
458 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
459 struct r600_query_hw *query)
460 {
461 uint64_t va;
462
463 r600_update_occlusion_query_state(ctx, query->b.type, 1);
464 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
465
466 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
467 TRUE);
468
469 /* Get a new query buffer if needed. */
470 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
471 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
472 *qbuf = query->buffer;
473 query->buffer.buf = r600_new_query_buffer(ctx, query);
474 query->buffer.results_end = 0;
475 query->buffer.previous = qbuf;
476 }
477
478 /* emit begin query */
479 va = query->buffer.buf->gpu_address + query->buffer.results_end;
480
481 query->ops->emit_start(ctx, query, query->buffer.buf, va);
482
483 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
484 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw_end;
485 else
486 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw_end;
487 }
488
489 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
490 struct r600_query_hw *query,
491 struct r600_resource *buffer,
492 uint64_t va)
493 {
494 struct radeon_winsys_cs *cs = ctx->gfx.cs;
495
496 switch (query->b.type) {
497 case PIPE_QUERY_OCCLUSION_COUNTER:
498 case PIPE_QUERY_OCCLUSION_PREDICATE:
499 va += 8;
500 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
501 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
502 radeon_emit(cs, va);
503 radeon_emit(cs, (va >> 32) & 0xFFFF);
504 break;
505 case PIPE_QUERY_PRIMITIVES_EMITTED:
506 case PIPE_QUERY_PRIMITIVES_GENERATED:
507 case PIPE_QUERY_SO_STATISTICS:
508 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
509 va += query->result_size/2;
510 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
511 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
512 radeon_emit(cs, va);
513 radeon_emit(cs, (va >> 32) & 0xFFFF);
514 break;
515 case PIPE_QUERY_TIME_ELAPSED:
516 va += query->result_size/2;
517 /* fall through */
518 case PIPE_QUERY_TIMESTAMP:
519 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
520 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
521 radeon_emit(cs, va);
522 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
523 radeon_emit(cs, 0);
524 radeon_emit(cs, 0);
525 break;
526 case PIPE_QUERY_PIPELINE_STATISTICS:
527 va += query->result_size/2;
528 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
529 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
530 radeon_emit(cs, va);
531 radeon_emit(cs, (va >> 32) & 0xFFFF);
532 break;
533 default:
534 assert(0);
535 }
536 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
537 RADEON_PRIO_QUERY);
538 }
539
540 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
541 struct r600_query_hw *query)
542 {
543 uint64_t va;
544
545 /* The queries which need begin already called this in begin_query. */
546 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
547 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
548 }
549
550 /* emit end query */
551 va = query->buffer.buf->gpu_address + query->buffer.results_end;
552
553 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
554
555 query->buffer.results_end += query->result_size;
556
557 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) {
558 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
559 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw_end;
560 else
561 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw_end;
562 }
563
564 r600_update_occlusion_query_state(ctx, query->b.type, -1);
565 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
566 }
567
568 static void r600_emit_query_predication(struct r600_common_context *ctx,
569 struct r600_atom *atom)
570 {
571 struct radeon_winsys_cs *cs = ctx->gfx.cs;
572 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
573 struct r600_query_buffer *qbuf;
574 uint32_t op;
575 bool flag_wait;
576
577 if (!query)
578 return;
579
580 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
581 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
582
583 switch (query->b.type) {
584 case PIPE_QUERY_OCCLUSION_COUNTER:
585 case PIPE_QUERY_OCCLUSION_PREDICATE:
586 op = PRED_OP(PREDICATION_OP_ZPASS);
587 break;
588 case PIPE_QUERY_PRIMITIVES_EMITTED:
589 case PIPE_QUERY_PRIMITIVES_GENERATED:
590 case PIPE_QUERY_SO_STATISTICS:
591 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
592 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
593 break;
594 default:
595 assert(0);
596 return;
597 }
598
599 /* if true then invert, see GL_ARB_conditional_render_inverted */
600 if (ctx->render_cond_invert)
601 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
602 else
603 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
604
605 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
606
607 /* emit predicate packets for all data blocks */
608 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
609 unsigned results_base = 0;
610 uint64_t va = qbuf->buf->gpu_address;
611
612 while (results_base < qbuf->results_end) {
613 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
614 radeon_emit(cs, va + results_base);
615 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
616 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
617 RADEON_PRIO_QUERY);
618 results_base += query->result_size;
619
620 /* set CONTINUE bit for all packets except the first */
621 op |= PREDICATION_CONTINUE;
622 }
623 }
624 }
625
626 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
627 {
628 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
629
630 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
631 query_type == PIPE_QUERY_GPU_FINISHED ||
632 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
633 return r600_query_sw_create(ctx, query_type);
634
635 return r600_query_hw_create(rctx, query_type, index);
636 }
637
638 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
639 {
640 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
641 struct r600_query *rquery = (struct r600_query *)query;
642
643 rquery->ops->destroy(rctx, rquery);
644 }
645
646 static boolean r600_begin_query(struct pipe_context *ctx,
647 struct pipe_query *query)
648 {
649 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
650 struct r600_query *rquery = (struct r600_query *)query;
651
652 return rquery->ops->begin(rctx, rquery);
653 }
654
655 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
656 struct r600_query_hw *query)
657 {
658 struct r600_query_buffer *prev = query->buffer.previous;
659
660 /* Discard the old query buffers. */
661 while (prev) {
662 struct r600_query_buffer *qbuf = prev;
663 prev = prev->previous;
664 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
665 FREE(qbuf);
666 }
667
668 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
669 /* Obtain a new buffer if the current one can't be mapped without a stall. */
670 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
671 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
672 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
673 query->buffer.buf = r600_new_query_buffer(rctx, query);
674 } else {
675 query->ops->prepare_buffer(rctx, query, query->buffer.buf);
676 }
677 }
678
679 query->buffer.results_end = 0;
680 query->buffer.previous = NULL;
681 }
682
683 boolean r600_query_hw_begin(struct r600_common_context *rctx,
684 struct r600_query *rquery)
685 {
686 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
687
688 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
689 assert(0);
690 return false;
691 }
692
693 r600_query_hw_reset_buffers(rctx, query);
694
695 r600_query_hw_emit_start(rctx, query);
696
697 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
698 LIST_ADDTAIL(&query->list, &rctx->active_timer_queries);
699 else
700 LIST_ADDTAIL(&query->list, &rctx->active_nontimer_queries);
701 return true;
702 }
703
704 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
705 {
706 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
707 struct r600_query *rquery = (struct r600_query *)query;
708
709 rquery->ops->end(rctx, rquery);
710 }
711
712 void r600_query_hw_end(struct r600_common_context *rctx,
713 struct r600_query *rquery)
714 {
715 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
716
717 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
718 r600_query_hw_reset_buffers(rctx, query);
719
720 r600_query_hw_emit_stop(rctx, query);
721
722 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
723 LIST_DELINIT(&query->list);
724 }
725
726 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
727 bool test_status_bit)
728 {
729 uint32_t *current_result = (uint32_t*)map;
730 uint64_t start, end;
731
732 start = (uint64_t)current_result[start_index] |
733 (uint64_t)current_result[start_index+1] << 32;
734 end = (uint64_t)current_result[end_index] |
735 (uint64_t)current_result[end_index+1] << 32;
736
737 if (!test_status_bit ||
738 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
739 return end - start;
740 }
741 return 0;
742 }
743
744 static void r600_query_hw_add_result(struct r600_common_context *ctx,
745 struct r600_query_hw *query,
746 void *buffer,
747 union pipe_query_result *result)
748 {
749 switch (query->b.type) {
750 case PIPE_QUERY_OCCLUSION_COUNTER: {
751 unsigned results_base = 0;
752 while (results_base != query->result_size) {
753 result->u64 +=
754 r600_query_read_result(buffer + results_base, 0, 2, true);
755 results_base += 16;
756 }
757 break;
758 }
759 case PIPE_QUERY_OCCLUSION_PREDICATE: {
760 unsigned results_base = 0;
761 while (results_base != query->result_size) {
762 result->b = result->b ||
763 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
764 results_base += 16;
765 }
766 break;
767 }
768 case PIPE_QUERY_TIME_ELAPSED:
769 result->u64 += r600_query_read_result(buffer, 0, 2, false);
770 break;
771 case PIPE_QUERY_TIMESTAMP:
772 {
773 uint32_t *current_result = (uint32_t*)buffer;
774 result->u64 = (uint64_t)current_result[0] |
775 (uint64_t)current_result[1] << 32;
776 break;
777 }
778 case PIPE_QUERY_PRIMITIVES_EMITTED:
779 /* SAMPLE_STREAMOUTSTATS stores this structure:
780 * {
781 * u64 NumPrimitivesWritten;
782 * u64 PrimitiveStorageNeeded;
783 * }
784 * We only need NumPrimitivesWritten here. */
785 result->u64 += r600_query_read_result(buffer, 2, 6, true);
786 break;
787 case PIPE_QUERY_PRIMITIVES_GENERATED:
788 /* Here we read PrimitiveStorageNeeded. */
789 result->u64 += r600_query_read_result(buffer, 0, 4, true);
790 break;
791 case PIPE_QUERY_SO_STATISTICS:
792 result->so_statistics.num_primitives_written +=
793 r600_query_read_result(buffer, 2, 6, true);
794 result->so_statistics.primitives_storage_needed +=
795 r600_query_read_result(buffer, 0, 4, true);
796 break;
797 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
798 result->b = result->b ||
799 r600_query_read_result(buffer, 2, 6, true) !=
800 r600_query_read_result(buffer, 0, 4, true);
801 break;
802 case PIPE_QUERY_PIPELINE_STATISTICS:
803 if (ctx->chip_class >= EVERGREEN) {
804 result->pipeline_statistics.ps_invocations +=
805 r600_query_read_result(buffer, 0, 22, false);
806 result->pipeline_statistics.c_primitives +=
807 r600_query_read_result(buffer, 2, 24, false);
808 result->pipeline_statistics.c_invocations +=
809 r600_query_read_result(buffer, 4, 26, false);
810 result->pipeline_statistics.vs_invocations +=
811 r600_query_read_result(buffer, 6, 28, false);
812 result->pipeline_statistics.gs_invocations +=
813 r600_query_read_result(buffer, 8, 30, false);
814 result->pipeline_statistics.gs_primitives +=
815 r600_query_read_result(buffer, 10, 32, false);
816 result->pipeline_statistics.ia_primitives +=
817 r600_query_read_result(buffer, 12, 34, false);
818 result->pipeline_statistics.ia_vertices +=
819 r600_query_read_result(buffer, 14, 36, false);
820 result->pipeline_statistics.hs_invocations +=
821 r600_query_read_result(buffer, 16, 38, false);
822 result->pipeline_statistics.ds_invocations +=
823 r600_query_read_result(buffer, 18, 40, false);
824 result->pipeline_statistics.cs_invocations +=
825 r600_query_read_result(buffer, 20, 42, false);
826 } else {
827 result->pipeline_statistics.ps_invocations +=
828 r600_query_read_result(buffer, 0, 16, false);
829 result->pipeline_statistics.c_primitives +=
830 r600_query_read_result(buffer, 2, 18, false);
831 result->pipeline_statistics.c_invocations +=
832 r600_query_read_result(buffer, 4, 20, false);
833 result->pipeline_statistics.vs_invocations +=
834 r600_query_read_result(buffer, 6, 22, false);
835 result->pipeline_statistics.gs_invocations +=
836 r600_query_read_result(buffer, 8, 24, false);
837 result->pipeline_statistics.gs_primitives +=
838 r600_query_read_result(buffer, 10, 26, false);
839 result->pipeline_statistics.ia_primitives +=
840 r600_query_read_result(buffer, 12, 28, false);
841 result->pipeline_statistics.ia_vertices +=
842 r600_query_read_result(buffer, 14, 30, false);
843 }
844 #if 0 /* for testing */
845 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
846 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
847 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
848 result->pipeline_statistics.ia_vertices,
849 result->pipeline_statistics.ia_primitives,
850 result->pipeline_statistics.vs_invocations,
851 result->pipeline_statistics.hs_invocations,
852 result->pipeline_statistics.ds_invocations,
853 result->pipeline_statistics.gs_invocations,
854 result->pipeline_statistics.gs_primitives,
855 result->pipeline_statistics.c_invocations,
856 result->pipeline_statistics.c_primitives,
857 result->pipeline_statistics.ps_invocations,
858 result->pipeline_statistics.cs_invocations);
859 #endif
860 break;
861 default:
862 assert(0);
863 }
864 }
865
866 static boolean r600_get_query_result(struct pipe_context *ctx,
867 struct pipe_query *query, boolean wait,
868 union pipe_query_result *result)
869 {
870 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
871 struct r600_query *rquery = (struct r600_query *)query;
872
873 return rquery->ops->get_result(rctx, rquery, wait, result);
874 }
875
876 static void r600_query_hw_clear_result(struct r600_query_hw *query,
877 union pipe_query_result *result)
878 {
879 util_query_clear_result(result, query->b.type);
880 }
881
882 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
883 struct r600_query *rquery,
884 boolean wait, union pipe_query_result *result)
885 {
886 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
887 struct r600_query_buffer *qbuf;
888
889 query->ops->clear_result(query, result);
890
891 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
892 unsigned results_base = 0;
893 void *map;
894
895 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
896 PIPE_TRANSFER_READ |
897 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
898 if (!map)
899 return FALSE;
900
901 while (results_base != qbuf->results_end) {
902 query->ops->add_result(rctx, query, map + results_base,
903 result);
904 results_base += query->result_size;
905 }
906 }
907
908 /* Convert the time to expected units. */
909 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
910 rquery->type == PIPE_QUERY_TIMESTAMP) {
911 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
912 }
913 return TRUE;
914 }
915
916 static void r600_render_condition(struct pipe_context *ctx,
917 struct pipe_query *query,
918 boolean condition,
919 uint mode)
920 {
921 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
922 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
923 struct r600_query_buffer *qbuf;
924 struct r600_atom *atom = &rctx->render_cond_atom;
925
926 rctx->render_cond = query;
927 rctx->render_cond_invert = condition;
928 rctx->render_cond_mode = mode;
929
930 /* Compute the size of SET_PREDICATION packets. */
931 atom->num_dw = 0;
932 if (query) {
933 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
934 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
935 }
936
937 rctx->set_atom_dirty(rctx, atom, query != NULL);
938 }
939
940 static void r600_suspend_queries(struct r600_common_context *ctx,
941 struct list_head *query_list,
942 unsigned *num_cs_dw_queries_suspend)
943 {
944 struct r600_query_hw *query;
945
946 LIST_FOR_EACH_ENTRY(query, query_list, list) {
947 r600_query_hw_emit_stop(ctx, query);
948 }
949 assert(*num_cs_dw_queries_suspend == 0);
950 }
951
952 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
953 {
954 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
955 &ctx->num_cs_dw_nontimer_queries_suspend);
956 }
957
958 void r600_suspend_timer_queries(struct r600_common_context *ctx)
959 {
960 r600_suspend_queries(ctx, &ctx->active_timer_queries,
961 &ctx->num_cs_dw_timer_queries_suspend);
962 }
963
964 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
965 struct list_head *query_list)
966 {
967 struct r600_query_hw *query;
968 unsigned num_dw = 0;
969
970 LIST_FOR_EACH_ENTRY(query, query_list, list) {
971 /* begin + end */
972 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
973
974 /* Workaround for the fact that
975 * num_cs_dw_nontimer_queries_suspend is incremented for every
976 * resumed query, which raises the bar in need_cs_space for
977 * queries about to be resumed.
978 */
979 num_dw += query->num_cs_dw_end;
980 }
981 /* primitives generated query */
982 num_dw += ctx->streamout.enable_atom.num_dw;
983 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
984 num_dw += 13;
985
986 return num_dw;
987 }
988
989 static void r600_resume_queries(struct r600_common_context *ctx,
990 struct list_head *query_list,
991 unsigned *num_cs_dw_queries_suspend)
992 {
993 struct r600_query_hw *query;
994 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
995
996 assert(*num_cs_dw_queries_suspend == 0);
997
998 /* Check CS space here. Resuming must not be interrupted by flushes. */
999 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1000
1001 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1002 r600_query_hw_emit_start(ctx, query);
1003 }
1004 }
1005
1006 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
1007 {
1008 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
1009 &ctx->num_cs_dw_nontimer_queries_suspend);
1010 }
1011
1012 void r600_resume_timer_queries(struct r600_common_context *ctx)
1013 {
1014 r600_resume_queries(ctx, &ctx->active_timer_queries,
1015 &ctx->num_cs_dw_timer_queries_suspend);
1016 }
1017
1018 /* Get backends mask */
1019 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1020 {
1021 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1022 struct r600_resource *buffer;
1023 uint32_t *results;
1024 unsigned num_backends = ctx->screen->info.r600_num_backends;
1025 unsigned i, mask = 0;
1026
1027 /* if backend_map query is supported by the kernel */
1028 if (ctx->screen->info.r600_backend_map_valid) {
1029 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
1030 unsigned backend_map = ctx->screen->info.r600_backend_map;
1031 unsigned item_width, item_mask;
1032
1033 if (ctx->chip_class >= EVERGREEN) {
1034 item_width = 4;
1035 item_mask = 0x7;
1036 } else {
1037 item_width = 2;
1038 item_mask = 0x3;
1039 }
1040
1041 while(num_tile_pipes--) {
1042 i = backend_map & item_mask;
1043 mask |= (1<<i);
1044 backend_map >>= item_width;
1045 }
1046 if (mask != 0) {
1047 ctx->backend_mask = mask;
1048 return;
1049 }
1050 }
1051
1052 /* otherwise backup path for older kernels */
1053
1054 /* create buffer for event data */
1055 buffer = (struct r600_resource*)
1056 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1057 PIPE_USAGE_STAGING, ctx->max_db*16);
1058 if (!buffer)
1059 goto err;
1060
1061 /* initialize buffer with zeroes */
1062 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1063 if (results) {
1064 memset(results, 0, ctx->max_db * 4 * 4);
1065
1066 /* emit EVENT_WRITE for ZPASS_DONE */
1067 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1068 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1069 radeon_emit(cs, buffer->gpu_address);
1070 radeon_emit(cs, buffer->gpu_address >> 32);
1071
1072 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1073 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1074
1075 /* analyze results */
1076 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1077 if (results) {
1078 for(i = 0; i < ctx->max_db; i++) {
1079 /* at least highest bit will be set if backend is used */
1080 if (results[i*4 + 1])
1081 mask |= (1<<i);
1082 }
1083 }
1084 }
1085
1086 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1087
1088 if (mask != 0) {
1089 ctx->backend_mask = mask;
1090 return;
1091 }
1092
1093 err:
1094 /* fallback to old method - set num_backends lower bits to 1 */
1095 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1096 return;
1097 }
1098
1099 #define X(name_, query_type_, type_, result_type_) \
1100 { \
1101 .name = name_, \
1102 .query_type = R600_QUERY_##query_type_, \
1103 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1104 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1105 .group_id = ~(unsigned)0 \
1106 }
1107
1108 static struct pipe_driver_query_info r600_driver_query_list[] = {
1109 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1110 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1111 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1112 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1113 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1114 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1115 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1116 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1117 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1118 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1119 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1120 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1121 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1122 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1123 };
1124
1125 #undef X
1126
1127 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1128 {
1129 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1130 return Elements(r600_driver_query_list);
1131 else if (rscreen->info.drm_major == 3)
1132 return Elements(r600_driver_query_list) - 3;
1133 else
1134 return Elements(r600_driver_query_list) - 4;
1135 }
1136
1137 static int r600_get_driver_query_info(struct pipe_screen *screen,
1138 unsigned index,
1139 struct pipe_driver_query_info *info)
1140 {
1141 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1142 unsigned num_queries = r600_get_num_queries(rscreen);
1143
1144 if (!info) {
1145 unsigned num_perfcounters =
1146 r600_get_perfcounter_info(rscreen, 0, NULL);
1147
1148 return num_queries + num_perfcounters;
1149 }
1150
1151 if (index >= num_queries)
1152 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1153
1154 *info = r600_driver_query_list[index];
1155
1156 switch (info->query_type) {
1157 case R600_QUERY_REQUESTED_VRAM:
1158 case R600_QUERY_VRAM_USAGE:
1159 info->max_value.u64 = rscreen->info.vram_size;
1160 break;
1161 case R600_QUERY_REQUESTED_GTT:
1162 case R600_QUERY_GTT_USAGE:
1163 info->max_value.u64 = rscreen->info.gart_size;
1164 break;
1165 case R600_QUERY_GPU_TEMPERATURE:
1166 info->max_value.u64 = 125;
1167 break;
1168 }
1169
1170 return 1;
1171 }
1172
1173 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1174 unsigned index,
1175 struct pipe_driver_query_group_info *info)
1176 {
1177 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1178
1179 return r600_get_perfcounter_group_info(rscreen, index, info);
1180 }
1181
1182 void r600_query_init(struct r600_common_context *rctx)
1183 {
1184 rctx->b.create_query = r600_create_query;
1185 rctx->b.create_batch_query = r600_create_batch_query;
1186 rctx->b.destroy_query = r600_destroy_query;
1187 rctx->b.begin_query = r600_begin_query;
1188 rctx->b.end_query = r600_end_query;
1189 rctx->b.get_query_result = r600_get_query_result;
1190 rctx->render_cond_atom.emit = r600_emit_query_predication;
1191
1192 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
1193 rctx->b.render_condition = r600_render_condition;
1194
1195 LIST_INITHEAD(&rctx->active_nontimer_queries);
1196 LIST_INITHEAD(&rctx->active_timer_queries);
1197 }
1198
1199 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1200 {
1201 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1202 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1203 }