gallium/radeon: rename the num-ctx-flushes query to num-GFX-IBs
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle *fence;
48 };
49
50 static void r600_query_sw_destroy(struct r600_common_context *rctx,
51 struct r600_query *rquery)
52 {
53 struct pipe_screen *screen = rctx->b.screen;
54 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
55
56 screen->fence_reference(screen, &query->fence, NULL);
57 FREE(query);
58 }
59
60 static enum radeon_value_id winsys_id_from_type(unsigned type)
61 {
62 switch (type) {
63 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
64 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
65 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
66 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
67 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
68 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
69 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
70 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
71 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
72 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
73 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
74 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
75 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
76 default: unreachable("query type does not correspond to winsys id");
77 }
78 }
79
80 static bool r600_query_sw_begin(struct r600_common_context *rctx,
81 struct r600_query *rquery)
82 {
83 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
84
85 switch(query->b.type) {
86 case PIPE_QUERY_TIMESTAMP_DISJOINT:
87 case PIPE_QUERY_GPU_FINISHED:
88 break;
89 case R600_QUERY_DRAW_CALLS:
90 query->begin_result = rctx->num_draw_calls;
91 break;
92 case R600_QUERY_SPILL_DRAW_CALLS:
93 query->begin_result = rctx->num_spill_draw_calls;
94 break;
95 case R600_QUERY_COMPUTE_CALLS:
96 query->begin_result = rctx->num_compute_calls;
97 break;
98 case R600_QUERY_SPILL_COMPUTE_CALLS:
99 query->begin_result = rctx->num_spill_compute_calls;
100 break;
101 case R600_QUERY_DMA_CALLS:
102 query->begin_result = rctx->num_dma_calls;
103 break;
104 case R600_QUERY_CP_DMA_CALLS:
105 query->begin_result = rctx->num_cp_dma_calls;
106 break;
107 case R600_QUERY_NUM_VS_FLUSHES:
108 query->begin_result = rctx->num_vs_flushes;
109 break;
110 case R600_QUERY_NUM_PS_FLUSHES:
111 query->begin_result = rctx->num_ps_flushes;
112 break;
113 case R600_QUERY_NUM_CS_FLUSHES:
114 query->begin_result = rctx->num_cs_flushes;
115 break;
116 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
117 query->begin_result = rctx->num_fb_cache_flushes;
118 break;
119 case R600_QUERY_NUM_L2_INVALIDATES:
120 query->begin_result = rctx->num_L2_invalidates;
121 break;
122 case R600_QUERY_NUM_L2_WRITEBACKS:
123 query->begin_result = rctx->num_L2_writebacks;
124 break;
125 case R600_QUERY_REQUESTED_VRAM:
126 case R600_QUERY_REQUESTED_GTT:
127 case R600_QUERY_MAPPED_VRAM:
128 case R600_QUERY_MAPPED_GTT:
129 case R600_QUERY_VRAM_USAGE:
130 case R600_QUERY_GTT_USAGE:
131 case R600_QUERY_GPU_TEMPERATURE:
132 case R600_QUERY_CURRENT_GPU_SCLK:
133 case R600_QUERY_CURRENT_GPU_MCLK:
134 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
135 query->begin_result = 0;
136 break;
137 case R600_QUERY_BUFFER_WAIT_TIME:
138 case R600_QUERY_NUM_GFX_IBS:
139 case R600_QUERY_NUM_BYTES_MOVED:
140 case R600_QUERY_NUM_EVICTIONS: {
141 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
142 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
143 break;
144 }
145 case R600_QUERY_GPU_LOAD:
146 query->begin_result = r600_gpu_load_begin(rctx->screen);
147 break;
148 case R600_QUERY_NUM_COMPILATIONS:
149 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
150 break;
151 case R600_QUERY_NUM_SHADERS_CREATED:
152 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
153 break;
154 case R600_QUERY_NUM_SHADER_CACHE_HITS:
155 query->begin_result =
156 p_atomic_read(&rctx->screen->num_shader_cache_hits);
157 break;
158 case R600_QUERY_GPIN_ASIC_ID:
159 case R600_QUERY_GPIN_NUM_SIMD:
160 case R600_QUERY_GPIN_NUM_RB:
161 case R600_QUERY_GPIN_NUM_SPI:
162 case R600_QUERY_GPIN_NUM_SE:
163 break;
164 default:
165 unreachable("r600_query_sw_begin: bad query type");
166 }
167
168 return true;
169 }
170
171 static bool r600_query_sw_end(struct r600_common_context *rctx,
172 struct r600_query *rquery)
173 {
174 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
175
176 switch(query->b.type) {
177 case PIPE_QUERY_TIMESTAMP_DISJOINT:
178 break;
179 case PIPE_QUERY_GPU_FINISHED:
180 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
181 break;
182 case R600_QUERY_DRAW_CALLS:
183 query->end_result = rctx->num_draw_calls;
184 break;
185 case R600_QUERY_SPILL_DRAW_CALLS:
186 query->end_result = rctx->num_spill_draw_calls;
187 break;
188 case R600_QUERY_COMPUTE_CALLS:
189 query->end_result = rctx->num_compute_calls;
190 break;
191 case R600_QUERY_SPILL_COMPUTE_CALLS:
192 query->end_result = rctx->num_spill_compute_calls;
193 break;
194 case R600_QUERY_DMA_CALLS:
195 query->end_result = rctx->num_dma_calls;
196 break;
197 case R600_QUERY_CP_DMA_CALLS:
198 query->end_result = rctx->num_cp_dma_calls;
199 break;
200 case R600_QUERY_NUM_VS_FLUSHES:
201 query->end_result = rctx->num_vs_flushes;
202 break;
203 case R600_QUERY_NUM_PS_FLUSHES:
204 query->end_result = rctx->num_ps_flushes;
205 break;
206 case R600_QUERY_NUM_CS_FLUSHES:
207 query->end_result = rctx->num_cs_flushes;
208 break;
209 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
210 query->end_result = rctx->num_fb_cache_flushes;
211 break;
212 case R600_QUERY_NUM_L2_INVALIDATES:
213 query->end_result = rctx->num_L2_invalidates;
214 break;
215 case R600_QUERY_NUM_L2_WRITEBACKS:
216 query->end_result = rctx->num_L2_writebacks;
217 break;
218 case R600_QUERY_REQUESTED_VRAM:
219 case R600_QUERY_REQUESTED_GTT:
220 case R600_QUERY_MAPPED_VRAM:
221 case R600_QUERY_MAPPED_GTT:
222 case R600_QUERY_VRAM_USAGE:
223 case R600_QUERY_GTT_USAGE:
224 case R600_QUERY_GPU_TEMPERATURE:
225 case R600_QUERY_CURRENT_GPU_SCLK:
226 case R600_QUERY_CURRENT_GPU_MCLK:
227 case R600_QUERY_BUFFER_WAIT_TIME:
228 case R600_QUERY_NUM_GFX_IBS:
229 case R600_QUERY_NUM_BYTES_MOVED:
230 case R600_QUERY_NUM_EVICTIONS: {
231 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
232 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
233 break;
234 }
235 case R600_QUERY_GPU_LOAD:
236 query->end_result = r600_gpu_load_end(rctx->screen,
237 query->begin_result);
238 query->begin_result = 0;
239 break;
240 case R600_QUERY_NUM_COMPILATIONS:
241 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
242 break;
243 case R600_QUERY_NUM_SHADERS_CREATED:
244 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
245 break;
246 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
247 query->end_result = rctx->last_tex_ps_draw_ratio;
248 break;
249 case R600_QUERY_NUM_SHADER_CACHE_HITS:
250 query->end_result =
251 p_atomic_read(&rctx->screen->num_shader_cache_hits);
252 break;
253 case R600_QUERY_GPIN_ASIC_ID:
254 case R600_QUERY_GPIN_NUM_SIMD:
255 case R600_QUERY_GPIN_NUM_RB:
256 case R600_QUERY_GPIN_NUM_SPI:
257 case R600_QUERY_GPIN_NUM_SE:
258 break;
259 default:
260 unreachable("r600_query_sw_end: bad query type");
261 }
262
263 return true;
264 }
265
266 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
267 struct r600_query *rquery,
268 bool wait,
269 union pipe_query_result *result)
270 {
271 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
272
273 switch (query->b.type) {
274 case PIPE_QUERY_TIMESTAMP_DISJOINT:
275 /* Convert from cycles per millisecond to cycles per second (Hz). */
276 result->timestamp_disjoint.frequency =
277 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
278 result->timestamp_disjoint.disjoint = false;
279 return true;
280 case PIPE_QUERY_GPU_FINISHED: {
281 struct pipe_screen *screen = rctx->b.screen;
282 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
283 wait ? PIPE_TIMEOUT_INFINITE : 0);
284 return result->b;
285 }
286
287 case R600_QUERY_GPIN_ASIC_ID:
288 result->u32 = 0;
289 return true;
290 case R600_QUERY_GPIN_NUM_SIMD:
291 result->u32 = rctx->screen->info.num_good_compute_units;
292 return true;
293 case R600_QUERY_GPIN_NUM_RB:
294 result->u32 = rctx->screen->info.num_render_backends;
295 return true;
296 case R600_QUERY_GPIN_NUM_SPI:
297 result->u32 = 1; /* all supported chips have one SPI per SE */
298 return true;
299 case R600_QUERY_GPIN_NUM_SE:
300 result->u32 = rctx->screen->info.max_se;
301 return true;
302 }
303
304 result->u64 = query->end_result - query->begin_result;
305
306 switch (query->b.type) {
307 case R600_QUERY_BUFFER_WAIT_TIME:
308 case R600_QUERY_GPU_TEMPERATURE:
309 result->u64 /= 1000;
310 break;
311 case R600_QUERY_CURRENT_GPU_SCLK:
312 case R600_QUERY_CURRENT_GPU_MCLK:
313 result->u64 *= 1000000;
314 break;
315 }
316
317 return true;
318 }
319
320
321 static struct r600_query_ops sw_query_ops = {
322 .destroy = r600_query_sw_destroy,
323 .begin = r600_query_sw_begin,
324 .end = r600_query_sw_end,
325 .get_result = r600_query_sw_get_result,
326 .get_result_resource = NULL
327 };
328
329 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
330 unsigned query_type)
331 {
332 struct r600_query_sw *query;
333
334 query = CALLOC_STRUCT(r600_query_sw);
335 if (!query)
336 return NULL;
337
338 query->b.type = query_type;
339 query->b.ops = &sw_query_ops;
340
341 return (struct pipe_query *)query;
342 }
343
344 void r600_query_hw_destroy(struct r600_common_context *rctx,
345 struct r600_query *rquery)
346 {
347 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
348 struct r600_query_buffer *prev = query->buffer.previous;
349
350 /* Release all query buffers. */
351 while (prev) {
352 struct r600_query_buffer *qbuf = prev;
353 prev = prev->previous;
354 r600_resource_reference(&qbuf->buf, NULL);
355 FREE(qbuf);
356 }
357
358 r600_resource_reference(&query->buffer.buf, NULL);
359 FREE(rquery);
360 }
361
362 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
363 struct r600_query_hw *query)
364 {
365 unsigned buf_size = MAX2(query->result_size,
366 ctx->screen->info.min_alloc_size);
367
368 /* Queries are normally read by the CPU after
369 * being written by the gpu, hence staging is probably a good
370 * usage pattern.
371 */
372 struct r600_resource *buf = (struct r600_resource*)
373 pipe_buffer_create(ctx->b.screen, 0,
374 PIPE_USAGE_STAGING, buf_size);
375 if (!buf)
376 return NULL;
377
378 if (!query->ops->prepare_buffer(ctx, query, buf)) {
379 r600_resource_reference(&buf, NULL);
380 return NULL;
381 }
382
383 return buf;
384 }
385
386 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
387 struct r600_query_hw *query,
388 struct r600_resource *buffer)
389 {
390 /* Callers ensure that the buffer is currently unused by the GPU. */
391 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
392 PIPE_TRANSFER_WRITE |
393 PIPE_TRANSFER_UNSYNCHRONIZED);
394 if (!results)
395 return false;
396
397 memset(results, 0, buffer->b.b.width0);
398
399 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
400 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
401 unsigned num_results;
402 unsigned i, j;
403
404 /* Set top bits for unused backends. */
405 num_results = buffer->b.b.width0 / query->result_size;
406 for (j = 0; j < num_results; j++) {
407 for (i = 0; i < ctx->max_db; i++) {
408 if (!(ctx->backend_mask & (1<<i))) {
409 results[(i * 4)+1] = 0x80000000;
410 results[(i * 4)+3] = 0x80000000;
411 }
412 }
413 results += 4 * ctx->max_db;
414 }
415 }
416
417 return true;
418 }
419
420 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
421 struct r600_query *rquery,
422 bool wait,
423 enum pipe_query_value_type result_type,
424 int index,
425 struct pipe_resource *resource,
426 unsigned offset);
427
428 static struct r600_query_ops query_hw_ops = {
429 .destroy = r600_query_hw_destroy,
430 .begin = r600_query_hw_begin,
431 .end = r600_query_hw_end,
432 .get_result = r600_query_hw_get_result,
433 .get_result_resource = r600_query_hw_get_result_resource,
434 };
435
436 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
437 struct r600_query_hw *query,
438 struct r600_resource *buffer,
439 uint64_t va);
440 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
441 struct r600_query_hw *query,
442 struct r600_resource *buffer,
443 uint64_t va);
444 static void r600_query_hw_add_result(struct r600_common_context *ctx,
445 struct r600_query_hw *, void *buffer,
446 union pipe_query_result *result);
447 static void r600_query_hw_clear_result(struct r600_query_hw *,
448 union pipe_query_result *);
449
450 static struct r600_query_hw_ops query_hw_default_hw_ops = {
451 .prepare_buffer = r600_query_hw_prepare_buffer,
452 .emit_start = r600_query_hw_do_emit_start,
453 .emit_stop = r600_query_hw_do_emit_stop,
454 .clear_result = r600_query_hw_clear_result,
455 .add_result = r600_query_hw_add_result,
456 };
457
458 bool r600_query_hw_init(struct r600_common_context *rctx,
459 struct r600_query_hw *query)
460 {
461 query->buffer.buf = r600_new_query_buffer(rctx, query);
462 if (!query->buffer.buf)
463 return false;
464
465 return true;
466 }
467
468 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
469 unsigned query_type,
470 unsigned index)
471 {
472 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
473 if (!query)
474 return NULL;
475
476 query->b.type = query_type;
477 query->b.ops = &query_hw_ops;
478 query->ops = &query_hw_default_hw_ops;
479
480 switch (query_type) {
481 case PIPE_QUERY_OCCLUSION_COUNTER:
482 case PIPE_QUERY_OCCLUSION_PREDICATE:
483 query->result_size = 16 * rctx->max_db;
484 query->result_size += 16; /* for the fence + alignment */
485 query->num_cs_dw_begin = 6;
486 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
487 break;
488 case PIPE_QUERY_TIME_ELAPSED:
489 query->result_size = 24;
490 query->num_cs_dw_begin = 8;
491 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
492 break;
493 case PIPE_QUERY_TIMESTAMP:
494 query->result_size = 16;
495 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
496 query->flags = R600_QUERY_HW_FLAG_NO_START;
497 break;
498 case PIPE_QUERY_PRIMITIVES_EMITTED:
499 case PIPE_QUERY_PRIMITIVES_GENERATED:
500 case PIPE_QUERY_SO_STATISTICS:
501 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
502 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
503 query->result_size = 32;
504 query->num_cs_dw_begin = 6;
505 query->num_cs_dw_end = 6;
506 query->stream = index;
507 break;
508 case PIPE_QUERY_PIPELINE_STATISTICS:
509 /* 11 values on EG, 8 on R600. */
510 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
511 query->result_size += 8; /* for the fence + alignment */
512 query->num_cs_dw_begin = 6;
513 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
514 break;
515 default:
516 assert(0);
517 FREE(query);
518 return NULL;
519 }
520
521 if (!r600_query_hw_init(rctx, query)) {
522 FREE(query);
523 return NULL;
524 }
525
526 return (struct pipe_query *)query;
527 }
528
529 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
530 unsigned type, int diff)
531 {
532 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
533 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
534 bool old_enable = rctx->num_occlusion_queries != 0;
535 bool old_perfect_enable =
536 rctx->num_perfect_occlusion_queries != 0;
537 bool enable, perfect_enable;
538
539 rctx->num_occlusion_queries += diff;
540 assert(rctx->num_occlusion_queries >= 0);
541
542 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
543 rctx->num_perfect_occlusion_queries += diff;
544 assert(rctx->num_perfect_occlusion_queries >= 0);
545 }
546
547 enable = rctx->num_occlusion_queries != 0;
548 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
549
550 if (enable != old_enable || perfect_enable != old_perfect_enable) {
551 rctx->set_occlusion_query_state(&rctx->b, enable);
552 }
553 }
554 }
555
556 static unsigned event_type_for_stream(struct r600_query_hw *query)
557 {
558 switch (query->stream) {
559 default:
560 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
561 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
562 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
563 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
564 }
565 }
566
567 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
568 struct r600_query_hw *query,
569 struct r600_resource *buffer,
570 uint64_t va)
571 {
572 struct radeon_winsys_cs *cs = ctx->gfx.cs;
573
574 switch (query->b.type) {
575 case PIPE_QUERY_OCCLUSION_COUNTER:
576 case PIPE_QUERY_OCCLUSION_PREDICATE:
577 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
578 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
579 radeon_emit(cs, va);
580 radeon_emit(cs, (va >> 32) & 0xFFFF);
581 break;
582 case PIPE_QUERY_PRIMITIVES_EMITTED:
583 case PIPE_QUERY_PRIMITIVES_GENERATED:
584 case PIPE_QUERY_SO_STATISTICS:
585 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
586 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
587 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
588 radeon_emit(cs, va);
589 radeon_emit(cs, (va >> 32) & 0xFFFF);
590 break;
591 case PIPE_QUERY_TIME_ELAPSED:
592 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
593 0, 3, NULL, va, 0, 0);
594 break;
595 case PIPE_QUERY_PIPELINE_STATISTICS:
596 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
597 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
598 radeon_emit(cs, va);
599 radeon_emit(cs, (va >> 32) & 0xFFFF);
600 break;
601 default:
602 assert(0);
603 }
604 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
605 RADEON_PRIO_QUERY);
606 }
607
608 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
609 struct r600_query_hw *query)
610 {
611 uint64_t va;
612
613 if (!query->buffer.buf)
614 return; // previous buffer allocation failure
615
616 r600_update_occlusion_query_state(ctx, query->b.type, 1);
617 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
618
619 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
620 true);
621
622 /* Get a new query buffer if needed. */
623 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
624 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
625 *qbuf = query->buffer;
626 query->buffer.results_end = 0;
627 query->buffer.previous = qbuf;
628 query->buffer.buf = r600_new_query_buffer(ctx, query);
629 if (!query->buffer.buf)
630 return;
631 }
632
633 /* emit begin query */
634 va = query->buffer.buf->gpu_address + query->buffer.results_end;
635
636 query->ops->emit_start(ctx, query, query->buffer.buf, va);
637
638 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
639 }
640
641 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
642 struct r600_query_hw *query,
643 struct r600_resource *buffer,
644 uint64_t va)
645 {
646 struct radeon_winsys_cs *cs = ctx->gfx.cs;
647 uint64_t fence_va = 0;
648
649 switch (query->b.type) {
650 case PIPE_QUERY_OCCLUSION_COUNTER:
651 case PIPE_QUERY_OCCLUSION_PREDICATE:
652 va += 8;
653 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
654 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
655 radeon_emit(cs, va);
656 radeon_emit(cs, (va >> 32) & 0xFFFF);
657
658 fence_va = va + ctx->max_db * 16 - 8;
659 break;
660 case PIPE_QUERY_PRIMITIVES_EMITTED:
661 case PIPE_QUERY_PRIMITIVES_GENERATED:
662 case PIPE_QUERY_SO_STATISTICS:
663 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
664 va += query->result_size/2;
665 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
666 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
667 radeon_emit(cs, va);
668 radeon_emit(cs, (va >> 32) & 0xFFFF);
669 break;
670 case PIPE_QUERY_TIME_ELAPSED:
671 va += 8;
672 /* fall through */
673 case PIPE_QUERY_TIMESTAMP:
674 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
675 0, 3, NULL, va, 0, 0);
676 fence_va = va + 8;
677 break;
678 case PIPE_QUERY_PIPELINE_STATISTICS: {
679 unsigned sample_size = (query->result_size - 8) / 2;
680
681 va += sample_size;
682 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
683 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
684 radeon_emit(cs, va);
685 radeon_emit(cs, (va >> 32) & 0xFFFF);
686
687 fence_va = va + sample_size;
688 break;
689 }
690 default:
691 assert(0);
692 }
693 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
694 RADEON_PRIO_QUERY);
695
696 if (fence_va)
697 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
698 query->buffer.buf, fence_va, 0, 0x80000000);
699 }
700
701 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
702 struct r600_query_hw *query)
703 {
704 uint64_t va;
705
706 if (!query->buffer.buf)
707 return; // previous buffer allocation failure
708
709 /* The queries which need begin already called this in begin_query. */
710 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
711 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
712 }
713
714 /* emit end query */
715 va = query->buffer.buf->gpu_address + query->buffer.results_end;
716
717 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
718
719 query->buffer.results_end += query->result_size;
720
721 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
722 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
723
724 r600_update_occlusion_query_state(ctx, query->b.type, -1);
725 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
726 }
727
728 static void r600_emit_query_predication(struct r600_common_context *ctx,
729 struct r600_atom *atom)
730 {
731 struct radeon_winsys_cs *cs = ctx->gfx.cs;
732 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
733 struct r600_query_buffer *qbuf;
734 uint32_t op;
735 bool flag_wait;
736
737 if (!query)
738 return;
739
740 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
741 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
742
743 switch (query->b.type) {
744 case PIPE_QUERY_OCCLUSION_COUNTER:
745 case PIPE_QUERY_OCCLUSION_PREDICATE:
746 op = PRED_OP(PREDICATION_OP_ZPASS);
747 break;
748 case PIPE_QUERY_PRIMITIVES_EMITTED:
749 case PIPE_QUERY_PRIMITIVES_GENERATED:
750 case PIPE_QUERY_SO_STATISTICS:
751 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
752 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
753 break;
754 default:
755 assert(0);
756 return;
757 }
758
759 /* if true then invert, see GL_ARB_conditional_render_inverted */
760 if (ctx->render_cond_invert)
761 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
762 else
763 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
764
765 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
766
767 /* emit predicate packets for all data blocks */
768 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
769 unsigned results_base = 0;
770 uint64_t va = qbuf->buf->gpu_address;
771
772 while (results_base < qbuf->results_end) {
773 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
774 radeon_emit(cs, va + results_base);
775 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
776 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
777 RADEON_PRIO_QUERY);
778 results_base += query->result_size;
779
780 /* set CONTINUE bit for all packets except the first */
781 op |= PREDICATION_CONTINUE;
782 }
783 }
784 }
785
786 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
787 {
788 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
789
790 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
791 query_type == PIPE_QUERY_GPU_FINISHED ||
792 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
793 return r600_query_sw_create(ctx, query_type);
794
795 return r600_query_hw_create(rctx, query_type, index);
796 }
797
798 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
799 {
800 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
801 struct r600_query *rquery = (struct r600_query *)query;
802
803 rquery->ops->destroy(rctx, rquery);
804 }
805
806 static boolean r600_begin_query(struct pipe_context *ctx,
807 struct pipe_query *query)
808 {
809 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
810 struct r600_query *rquery = (struct r600_query *)query;
811
812 return rquery->ops->begin(rctx, rquery);
813 }
814
815 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
816 struct r600_query_hw *query)
817 {
818 struct r600_query_buffer *prev = query->buffer.previous;
819
820 /* Discard the old query buffers. */
821 while (prev) {
822 struct r600_query_buffer *qbuf = prev;
823 prev = prev->previous;
824 r600_resource_reference(&qbuf->buf, NULL);
825 FREE(qbuf);
826 }
827
828 query->buffer.results_end = 0;
829 query->buffer.previous = NULL;
830
831 /* Obtain a new buffer if the current one can't be mapped without a stall. */
832 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
833 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
834 r600_resource_reference(&query->buffer.buf, NULL);
835 query->buffer.buf = r600_new_query_buffer(rctx, query);
836 } else {
837 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
838 r600_resource_reference(&query->buffer.buf, NULL);
839 }
840 }
841
842 bool r600_query_hw_begin(struct r600_common_context *rctx,
843 struct r600_query *rquery)
844 {
845 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
846
847 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
848 assert(0);
849 return false;
850 }
851
852 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
853 r600_query_hw_reset_buffers(rctx, query);
854
855 r600_query_hw_emit_start(rctx, query);
856 if (!query->buffer.buf)
857 return false;
858
859 LIST_ADDTAIL(&query->list, &rctx->active_queries);
860 return true;
861 }
862
863 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
864 {
865 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
866 struct r600_query *rquery = (struct r600_query *)query;
867
868 return rquery->ops->end(rctx, rquery);
869 }
870
871 bool r600_query_hw_end(struct r600_common_context *rctx,
872 struct r600_query *rquery)
873 {
874 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
875
876 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
877 r600_query_hw_reset_buffers(rctx, query);
878
879 r600_query_hw_emit_stop(rctx, query);
880
881 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
882 LIST_DELINIT(&query->list);
883
884 if (!query->buffer.buf)
885 return false;
886
887 return true;
888 }
889
890 static void r600_get_hw_query_params(struct r600_common_context *rctx,
891 struct r600_query_hw *rquery, int index,
892 struct r600_hw_query_params *params)
893 {
894 params->pair_stride = 0;
895 params->pair_count = 1;
896
897 switch (rquery->b.type) {
898 case PIPE_QUERY_OCCLUSION_COUNTER:
899 case PIPE_QUERY_OCCLUSION_PREDICATE:
900 params->start_offset = 0;
901 params->end_offset = 8;
902 params->fence_offset = rctx->max_db * 16;
903 params->pair_stride = 16;
904 params->pair_count = rctx->max_db;
905 break;
906 case PIPE_QUERY_TIME_ELAPSED:
907 params->start_offset = 0;
908 params->end_offset = 8;
909 params->fence_offset = 16;
910 break;
911 case PIPE_QUERY_TIMESTAMP:
912 params->start_offset = 0;
913 params->end_offset = 0;
914 params->fence_offset = 8;
915 break;
916 case PIPE_QUERY_PRIMITIVES_EMITTED:
917 params->start_offset = 8;
918 params->end_offset = 24;
919 params->fence_offset = params->end_offset + 4;
920 break;
921 case PIPE_QUERY_PRIMITIVES_GENERATED:
922 params->start_offset = 0;
923 params->end_offset = 16;
924 params->fence_offset = params->end_offset + 4;
925 break;
926 case PIPE_QUERY_SO_STATISTICS:
927 params->start_offset = 8 - index * 8;
928 params->end_offset = 24 - index * 8;
929 params->fence_offset = params->end_offset + 4;
930 break;
931 case PIPE_QUERY_PIPELINE_STATISTICS:
932 {
933 /* Offsets apply to EG+ */
934 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
935 params->start_offset = offsets[index];
936 params->end_offset = 88 + offsets[index];
937 params->fence_offset = 2 * 88;
938 break;
939 }
940 default:
941 unreachable("r600_get_hw_query_params unsupported");
942 }
943 }
944
945 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
946 bool test_status_bit)
947 {
948 uint32_t *current_result = (uint32_t*)map;
949 uint64_t start, end;
950
951 start = (uint64_t)current_result[start_index] |
952 (uint64_t)current_result[start_index+1] << 32;
953 end = (uint64_t)current_result[end_index] |
954 (uint64_t)current_result[end_index+1] << 32;
955
956 if (!test_status_bit ||
957 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
958 return end - start;
959 }
960 return 0;
961 }
962
963 static void r600_query_hw_add_result(struct r600_common_context *ctx,
964 struct r600_query_hw *query,
965 void *buffer,
966 union pipe_query_result *result)
967 {
968 switch (query->b.type) {
969 case PIPE_QUERY_OCCLUSION_COUNTER: {
970 for (unsigned i = 0; i < ctx->max_db; ++i) {
971 unsigned results_base = i * 16;
972 result->u64 +=
973 r600_query_read_result(buffer + results_base, 0, 2, true);
974 }
975 break;
976 }
977 case PIPE_QUERY_OCCLUSION_PREDICATE: {
978 for (unsigned i = 0; i < ctx->max_db; ++i) {
979 unsigned results_base = i * 16;
980 result->b = result->b ||
981 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
982 }
983 break;
984 }
985 case PIPE_QUERY_TIME_ELAPSED:
986 result->u64 += r600_query_read_result(buffer, 0, 2, false);
987 break;
988 case PIPE_QUERY_TIMESTAMP:
989 result->u64 = *(uint64_t*)buffer;
990 break;
991 case PIPE_QUERY_PRIMITIVES_EMITTED:
992 /* SAMPLE_STREAMOUTSTATS stores this structure:
993 * {
994 * u64 NumPrimitivesWritten;
995 * u64 PrimitiveStorageNeeded;
996 * }
997 * We only need NumPrimitivesWritten here. */
998 result->u64 += r600_query_read_result(buffer, 2, 6, true);
999 break;
1000 case PIPE_QUERY_PRIMITIVES_GENERATED:
1001 /* Here we read PrimitiveStorageNeeded. */
1002 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1003 break;
1004 case PIPE_QUERY_SO_STATISTICS:
1005 result->so_statistics.num_primitives_written +=
1006 r600_query_read_result(buffer, 2, 6, true);
1007 result->so_statistics.primitives_storage_needed +=
1008 r600_query_read_result(buffer, 0, 4, true);
1009 break;
1010 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1011 result->b = result->b ||
1012 r600_query_read_result(buffer, 2, 6, true) !=
1013 r600_query_read_result(buffer, 0, 4, true);
1014 break;
1015 case PIPE_QUERY_PIPELINE_STATISTICS:
1016 if (ctx->chip_class >= EVERGREEN) {
1017 result->pipeline_statistics.ps_invocations +=
1018 r600_query_read_result(buffer, 0, 22, false);
1019 result->pipeline_statistics.c_primitives +=
1020 r600_query_read_result(buffer, 2, 24, false);
1021 result->pipeline_statistics.c_invocations +=
1022 r600_query_read_result(buffer, 4, 26, false);
1023 result->pipeline_statistics.vs_invocations +=
1024 r600_query_read_result(buffer, 6, 28, false);
1025 result->pipeline_statistics.gs_invocations +=
1026 r600_query_read_result(buffer, 8, 30, false);
1027 result->pipeline_statistics.gs_primitives +=
1028 r600_query_read_result(buffer, 10, 32, false);
1029 result->pipeline_statistics.ia_primitives +=
1030 r600_query_read_result(buffer, 12, 34, false);
1031 result->pipeline_statistics.ia_vertices +=
1032 r600_query_read_result(buffer, 14, 36, false);
1033 result->pipeline_statistics.hs_invocations +=
1034 r600_query_read_result(buffer, 16, 38, false);
1035 result->pipeline_statistics.ds_invocations +=
1036 r600_query_read_result(buffer, 18, 40, false);
1037 result->pipeline_statistics.cs_invocations +=
1038 r600_query_read_result(buffer, 20, 42, false);
1039 } else {
1040 result->pipeline_statistics.ps_invocations +=
1041 r600_query_read_result(buffer, 0, 16, false);
1042 result->pipeline_statistics.c_primitives +=
1043 r600_query_read_result(buffer, 2, 18, false);
1044 result->pipeline_statistics.c_invocations +=
1045 r600_query_read_result(buffer, 4, 20, false);
1046 result->pipeline_statistics.vs_invocations +=
1047 r600_query_read_result(buffer, 6, 22, false);
1048 result->pipeline_statistics.gs_invocations +=
1049 r600_query_read_result(buffer, 8, 24, false);
1050 result->pipeline_statistics.gs_primitives +=
1051 r600_query_read_result(buffer, 10, 26, false);
1052 result->pipeline_statistics.ia_primitives +=
1053 r600_query_read_result(buffer, 12, 28, false);
1054 result->pipeline_statistics.ia_vertices +=
1055 r600_query_read_result(buffer, 14, 30, false);
1056 }
1057 #if 0 /* for testing */
1058 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1059 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1060 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1061 result->pipeline_statistics.ia_vertices,
1062 result->pipeline_statistics.ia_primitives,
1063 result->pipeline_statistics.vs_invocations,
1064 result->pipeline_statistics.hs_invocations,
1065 result->pipeline_statistics.ds_invocations,
1066 result->pipeline_statistics.gs_invocations,
1067 result->pipeline_statistics.gs_primitives,
1068 result->pipeline_statistics.c_invocations,
1069 result->pipeline_statistics.c_primitives,
1070 result->pipeline_statistics.ps_invocations,
1071 result->pipeline_statistics.cs_invocations);
1072 #endif
1073 break;
1074 default:
1075 assert(0);
1076 }
1077 }
1078
1079 static boolean r600_get_query_result(struct pipe_context *ctx,
1080 struct pipe_query *query, boolean wait,
1081 union pipe_query_result *result)
1082 {
1083 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1084 struct r600_query *rquery = (struct r600_query *)query;
1085
1086 return rquery->ops->get_result(rctx, rquery, wait, result);
1087 }
1088
1089 static void r600_get_query_result_resource(struct pipe_context *ctx,
1090 struct pipe_query *query,
1091 boolean wait,
1092 enum pipe_query_value_type result_type,
1093 int index,
1094 struct pipe_resource *resource,
1095 unsigned offset)
1096 {
1097 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1098 struct r600_query *rquery = (struct r600_query *)query;
1099
1100 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1101 resource, offset);
1102 }
1103
1104 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1105 union pipe_query_result *result)
1106 {
1107 util_query_clear_result(result, query->b.type);
1108 }
1109
1110 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1111 struct r600_query *rquery,
1112 bool wait, union pipe_query_result *result)
1113 {
1114 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1115 struct r600_query_buffer *qbuf;
1116
1117 query->ops->clear_result(query, result);
1118
1119 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1120 unsigned results_base = 0;
1121 void *map;
1122
1123 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1124 PIPE_TRANSFER_READ |
1125 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1126 if (!map)
1127 return false;
1128
1129 while (results_base != qbuf->results_end) {
1130 query->ops->add_result(rctx, query, map + results_base,
1131 result);
1132 results_base += query->result_size;
1133 }
1134 }
1135
1136 /* Convert the time to expected units. */
1137 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1138 rquery->type == PIPE_QUERY_TIMESTAMP) {
1139 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1140 }
1141 return true;
1142 }
1143
1144 /* Create the compute shader that is used to collect the results.
1145 *
1146 * One compute grid with a single thread is launched for every query result
1147 * buffer. The thread (optionally) reads a previous summary buffer, then
1148 * accumulates data from the query result buffer, and writes the result either
1149 * to a summary buffer to be consumed by the next grid invocation or to the
1150 * user-supplied buffer.
1151 *
1152 * Data layout:
1153 *
1154 * CONST
1155 * 0.x = end_offset
1156 * 0.y = result_stride
1157 * 0.z = result_count
1158 * 0.w = bit field:
1159 * 1: read previously accumulated values
1160 * 2: write accumulated values for chaining
1161 * 4: write result available
1162 * 8: convert result to boolean (0/1)
1163 * 16: only read one dword and use that as result
1164 * 32: apply timestamp conversion
1165 * 64: store full 64 bits result
1166 * 128: store signed 32 bits result
1167 * 1.x = fence_offset
1168 * 1.y = pair_stride
1169 * 1.z = pair_count
1170 *
1171 * BUFFER[0] = query result buffer
1172 * BUFFER[1] = previous summary buffer
1173 * BUFFER[2] = next summary buffer or user-supplied buffer
1174 */
1175 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1176 {
1177 /* TEMP[0].xy = accumulated result so far
1178 * TEMP[0].z = result not available
1179 *
1180 * TEMP[1].x = current result index
1181 * TEMP[1].y = current pair index
1182 */
1183 static const char text_tmpl[] =
1184 "COMP\n"
1185 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1186 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1187 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1188 "DCL BUFFER[0]\n"
1189 "DCL BUFFER[1]\n"
1190 "DCL BUFFER[2]\n"
1191 "DCL CONST[0..1]\n"
1192 "DCL TEMP[0..5]\n"
1193 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1194 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1195 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1196 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1197
1198 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1199 "UIF TEMP[5]\n"
1200 /* Check result availability. */
1201 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1202 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1203 "MOV TEMP[1], TEMP[0].zzzz\n"
1204 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1205
1206 /* Load result if available. */
1207 "UIF TEMP[1]\n"
1208 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1209 "ENDIF\n"
1210 "ELSE\n"
1211 /* Load previously accumulated result if requested. */
1212 "MOV TEMP[0], IMM[0].xxxx\n"
1213 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1214 "UIF TEMP[4]\n"
1215 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1216 "ENDIF\n"
1217
1218 "MOV TEMP[1].x, IMM[0].xxxx\n"
1219 "BGNLOOP\n"
1220 /* Break if accumulated result so far is not available. */
1221 "UIF TEMP[0].zzzz\n"
1222 "BRK\n"
1223 "ENDIF\n"
1224
1225 /* Break if result_index >= result_count. */
1226 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1227 "UIF TEMP[5]\n"
1228 "BRK\n"
1229 "ENDIF\n"
1230
1231 /* Load fence and check result availability */
1232 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1233 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1234 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1235 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1236 "UIF TEMP[0].zzzz\n"
1237 "BRK\n"
1238 "ENDIF\n"
1239
1240 "MOV TEMP[1].y, IMM[0].xxxx\n"
1241 "BGNLOOP\n"
1242 /* Load start and end. */
1243 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1244 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1245 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1246
1247 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1248 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1249
1250 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1251 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1252
1253 /* Increment pair index */
1254 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1255 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1256 "UIF TEMP[5]\n"
1257 "BRK\n"
1258 "ENDIF\n"
1259 "ENDLOOP\n"
1260
1261 /* Increment result index */
1262 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1263 "ENDLOOP\n"
1264 "ENDIF\n"
1265
1266 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1267 "UIF TEMP[4]\n"
1268 /* Store accumulated data for chaining. */
1269 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1270 "ELSE\n"
1271 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1272 "UIF TEMP[4]\n"
1273 /* Store result availability. */
1274 "NOT TEMP[0].z, TEMP[0]\n"
1275 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1276 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1277
1278 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1279 "UIF TEMP[4]\n"
1280 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1281 "ENDIF\n"
1282 "ELSE\n"
1283 /* Store result if it is available. */
1284 "NOT TEMP[4], TEMP[0].zzzz\n"
1285 "UIF TEMP[4]\n"
1286 /* Apply timestamp conversion */
1287 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1288 "UIF TEMP[4]\n"
1289 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1290 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1291 "ENDIF\n"
1292
1293 /* Convert to boolean */
1294 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1295 "UIF TEMP[4]\n"
1296 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1297 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1298 "MOV TEMP[0].y, IMM[0].xxxx\n"
1299 "ENDIF\n"
1300
1301 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1302 "UIF TEMP[4]\n"
1303 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1304 "ELSE\n"
1305 /* Clamping */
1306 "UIF TEMP[0].yyyy\n"
1307 "MOV TEMP[0].x, IMM[0].wwww\n"
1308 "ENDIF\n"
1309
1310 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1311 "UIF TEMP[4]\n"
1312 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1313 "ENDIF\n"
1314
1315 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1316 "ENDIF\n"
1317 "ENDIF\n"
1318 "ENDIF\n"
1319 "ENDIF\n"
1320
1321 "END\n";
1322
1323 char text[sizeof(text_tmpl) + 32];
1324 struct tgsi_token tokens[1024];
1325 struct pipe_compute_state state = {};
1326
1327 /* Hard code the frequency into the shader so that the backend can
1328 * use the full range of optimizations for divide-by-constant.
1329 */
1330 snprintf(text, sizeof(text), text_tmpl,
1331 rctx->screen->info.clock_crystal_freq);
1332
1333 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1334 assert(false);
1335 return;
1336 }
1337
1338 state.ir_type = PIPE_SHADER_IR_TGSI;
1339 state.prog = tokens;
1340
1341 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1342 }
1343
1344 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1345 struct r600_qbo_state *st)
1346 {
1347 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1348
1349 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1350 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1351
1352 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1353 for (unsigned i = 0; i < 3; ++i)
1354 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1355 }
1356
1357 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1358 struct r600_query *rquery,
1359 bool wait,
1360 enum pipe_query_value_type result_type,
1361 int index,
1362 struct pipe_resource *resource,
1363 unsigned offset)
1364 {
1365 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1366 struct r600_query_buffer *qbuf;
1367 struct r600_query_buffer *qbuf_prev;
1368 struct pipe_resource *tmp_buffer = NULL;
1369 unsigned tmp_buffer_offset = 0;
1370 struct r600_qbo_state saved_state = {};
1371 struct pipe_grid_info grid = {};
1372 struct pipe_constant_buffer constant_buffer = {};
1373 struct pipe_shader_buffer ssbo[3];
1374 struct r600_hw_query_params params;
1375 struct {
1376 uint32_t end_offset;
1377 uint32_t result_stride;
1378 uint32_t result_count;
1379 uint32_t config;
1380 uint32_t fence_offset;
1381 uint32_t pair_stride;
1382 uint32_t pair_count;
1383 } consts;
1384
1385 if (!rctx->query_result_shader) {
1386 r600_create_query_result_shader(rctx);
1387 if (!rctx->query_result_shader)
1388 return;
1389 }
1390
1391 if (query->buffer.previous) {
1392 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1393 &tmp_buffer_offset, &tmp_buffer);
1394 if (!tmp_buffer)
1395 return;
1396 }
1397
1398 rctx->save_qbo_state(&rctx->b, &saved_state);
1399
1400 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1401 consts.end_offset = params.end_offset - params.start_offset;
1402 consts.fence_offset = params.fence_offset - params.start_offset;
1403 consts.result_stride = query->result_size;
1404 consts.pair_stride = params.pair_stride;
1405 consts.pair_count = params.pair_count;
1406
1407 constant_buffer.buffer_size = sizeof(consts);
1408 constant_buffer.user_buffer = &consts;
1409
1410 ssbo[1].buffer = tmp_buffer;
1411 ssbo[1].buffer_offset = tmp_buffer_offset;
1412 ssbo[1].buffer_size = 16;
1413
1414 ssbo[2] = ssbo[1];
1415
1416 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1417
1418 grid.block[0] = 1;
1419 grid.block[1] = 1;
1420 grid.block[2] = 1;
1421 grid.grid[0] = 1;
1422 grid.grid[1] = 1;
1423 grid.grid[2] = 1;
1424
1425 consts.config = 0;
1426 if (index < 0)
1427 consts.config |= 4;
1428 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1429 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1430 consts.config |= 8;
1431 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1432 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1433 consts.config |= 32;
1434
1435 switch (result_type) {
1436 case PIPE_QUERY_TYPE_U64:
1437 case PIPE_QUERY_TYPE_I64:
1438 consts.config |= 64;
1439 break;
1440 case PIPE_QUERY_TYPE_I32:
1441 consts.config |= 128;
1442 break;
1443 case PIPE_QUERY_TYPE_U32:
1444 break;
1445 }
1446
1447 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1448
1449 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1450 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1451 qbuf_prev = qbuf->previous;
1452 consts.result_count = qbuf->results_end / query->result_size;
1453 consts.config &= ~3;
1454 if (qbuf != &query->buffer)
1455 consts.config |= 1;
1456 if (qbuf->previous)
1457 consts.config |= 2;
1458 } else {
1459 /* Only read the last timestamp. */
1460 qbuf_prev = NULL;
1461 consts.result_count = 0;
1462 consts.config |= 16;
1463 params.start_offset += qbuf->results_end - query->result_size;
1464 }
1465
1466 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1467
1468 ssbo[0].buffer = &qbuf->buf->b.b;
1469 ssbo[0].buffer_offset = params.start_offset;
1470 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1471
1472 if (!qbuf->previous) {
1473 ssbo[2].buffer = resource;
1474 ssbo[2].buffer_offset = offset;
1475 ssbo[2].buffer_size = 8;
1476
1477 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1478 }
1479
1480 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1481
1482 if (wait && qbuf == &query->buffer) {
1483 uint64_t va;
1484
1485 /* Wait for result availability. Wait only for readiness
1486 * of the last entry, since the fence writes should be
1487 * serialized in the CP.
1488 */
1489 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1490 va += params.fence_offset;
1491
1492 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1493 }
1494
1495 rctx->b.launch_grid(&rctx->b, &grid);
1496 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1497 }
1498
1499 r600_restore_qbo_state(rctx, &saved_state);
1500 pipe_resource_reference(&tmp_buffer, NULL);
1501 }
1502
1503 static void r600_render_condition(struct pipe_context *ctx,
1504 struct pipe_query *query,
1505 boolean condition,
1506 uint mode)
1507 {
1508 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1509 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1510 struct r600_query_buffer *qbuf;
1511 struct r600_atom *atom = &rctx->render_cond_atom;
1512
1513 rctx->render_cond = query;
1514 rctx->render_cond_invert = condition;
1515 rctx->render_cond_mode = mode;
1516
1517 /* Compute the size of SET_PREDICATION packets. */
1518 atom->num_dw = 0;
1519 if (query) {
1520 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1521 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1522 }
1523
1524 rctx->set_atom_dirty(rctx, atom, query != NULL);
1525 }
1526
1527 void r600_suspend_queries(struct r600_common_context *ctx)
1528 {
1529 struct r600_query_hw *query;
1530
1531 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1532 r600_query_hw_emit_stop(ctx, query);
1533 }
1534 assert(ctx->num_cs_dw_queries_suspend == 0);
1535 }
1536
1537 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1538 struct list_head *query_list)
1539 {
1540 struct r600_query_hw *query;
1541 unsigned num_dw = 0;
1542
1543 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1544 /* begin + end */
1545 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1546
1547 /* Workaround for the fact that
1548 * num_cs_dw_nontimer_queries_suspend is incremented for every
1549 * resumed query, which raises the bar in need_cs_space for
1550 * queries about to be resumed.
1551 */
1552 num_dw += query->num_cs_dw_end;
1553 }
1554 /* primitives generated query */
1555 num_dw += ctx->streamout.enable_atom.num_dw;
1556 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1557 num_dw += 13;
1558
1559 return num_dw;
1560 }
1561
1562 void r600_resume_queries(struct r600_common_context *ctx)
1563 {
1564 struct r600_query_hw *query;
1565 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1566
1567 assert(ctx->num_cs_dw_queries_suspend == 0);
1568
1569 /* Check CS space here. Resuming must not be interrupted by flushes. */
1570 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1571
1572 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1573 r600_query_hw_emit_start(ctx, query);
1574 }
1575 }
1576
1577 /* Get backends mask */
1578 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1579 {
1580 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1581 struct r600_resource *buffer;
1582 uint32_t *results;
1583 unsigned num_backends = ctx->screen->info.num_render_backends;
1584 unsigned i, mask = 0;
1585
1586 /* if backend_map query is supported by the kernel */
1587 if (ctx->screen->info.r600_gb_backend_map_valid) {
1588 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1589 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1590 unsigned item_width, item_mask;
1591
1592 if (ctx->chip_class >= EVERGREEN) {
1593 item_width = 4;
1594 item_mask = 0x7;
1595 } else {
1596 item_width = 2;
1597 item_mask = 0x3;
1598 }
1599
1600 while (num_tile_pipes--) {
1601 i = backend_map & item_mask;
1602 mask |= (1<<i);
1603 backend_map >>= item_width;
1604 }
1605 if (mask != 0) {
1606 ctx->backend_mask = mask;
1607 return;
1608 }
1609 }
1610
1611 /* otherwise backup path for older kernels */
1612
1613 /* create buffer for event data */
1614 buffer = (struct r600_resource*)
1615 pipe_buffer_create(ctx->b.screen, 0,
1616 PIPE_USAGE_STAGING, ctx->max_db*16);
1617 if (!buffer)
1618 goto err;
1619
1620 /* initialize buffer with zeroes */
1621 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1622 if (results) {
1623 memset(results, 0, ctx->max_db * 4 * 4);
1624
1625 /* emit EVENT_WRITE for ZPASS_DONE */
1626 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1627 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1628 radeon_emit(cs, buffer->gpu_address);
1629 radeon_emit(cs, buffer->gpu_address >> 32);
1630
1631 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1632 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1633
1634 /* analyze results */
1635 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1636 if (results) {
1637 for(i = 0; i < ctx->max_db; i++) {
1638 /* at least highest bit will be set if backend is used */
1639 if (results[i*4 + 1])
1640 mask |= (1<<i);
1641 }
1642 }
1643 }
1644
1645 r600_resource_reference(&buffer, NULL);
1646
1647 if (mask != 0) {
1648 ctx->backend_mask = mask;
1649 return;
1650 }
1651
1652 err:
1653 /* fallback to old method - set num_backends lower bits to 1 */
1654 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1655 return;
1656 }
1657
1658 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1659 { \
1660 .name = name_, \
1661 .query_type = R600_QUERY_##query_type_, \
1662 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1663 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1664 .group_id = group_id_ \
1665 }
1666
1667 #define X(name_, query_type_, type_, result_type_) \
1668 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1669
1670 #define XG(group_, name_, query_type_, type_, result_type_) \
1671 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1672
1673 static struct pipe_driver_query_info r600_driver_query_list[] = {
1674 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1675 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1676 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1677 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1678 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1679 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1680 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1681 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1682 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1683 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1684 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1685 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1686 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1687 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1688 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1689 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1690 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1691 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1692 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1693 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1694 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1695 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1696 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1697 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1698 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1699 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1700
1701 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1702 * which use it as a fallback path to detect the GPU type.
1703 *
1704 * Note: The names of these queries are significant for GPUPerfStudio
1705 * (and possibly their order as well). */
1706 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1707 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1708 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1709 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1710 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1711
1712 /* The following queries must be at the end of the list because their
1713 * availability is adjusted dynamically based on the DRM version. */
1714 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1715 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1716 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1717 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1718 };
1719
1720 #undef X
1721 #undef XG
1722 #undef XFULL
1723
1724 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1725 {
1726 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1727 return ARRAY_SIZE(r600_driver_query_list);
1728 else if (rscreen->info.drm_major == 3)
1729 return ARRAY_SIZE(r600_driver_query_list) - 3;
1730 else
1731 return ARRAY_SIZE(r600_driver_query_list) - 4;
1732 }
1733
1734 static int r600_get_driver_query_info(struct pipe_screen *screen,
1735 unsigned index,
1736 struct pipe_driver_query_info *info)
1737 {
1738 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1739 unsigned num_queries = r600_get_num_queries(rscreen);
1740
1741 if (!info) {
1742 unsigned num_perfcounters =
1743 r600_get_perfcounter_info(rscreen, 0, NULL);
1744
1745 return num_queries + num_perfcounters;
1746 }
1747
1748 if (index >= num_queries)
1749 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1750
1751 *info = r600_driver_query_list[index];
1752
1753 switch (info->query_type) {
1754 case R600_QUERY_REQUESTED_VRAM:
1755 case R600_QUERY_VRAM_USAGE:
1756 case R600_QUERY_MAPPED_VRAM:
1757 info->max_value.u64 = rscreen->info.vram_size;
1758 break;
1759 case R600_QUERY_REQUESTED_GTT:
1760 case R600_QUERY_GTT_USAGE:
1761 case R600_QUERY_MAPPED_GTT:
1762 info->max_value.u64 = rscreen->info.gart_size;
1763 break;
1764 case R600_QUERY_GPU_TEMPERATURE:
1765 info->max_value.u64 = 125;
1766 break;
1767 }
1768
1769 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1770 info->group_id += rscreen->perfcounters->num_groups;
1771
1772 return 1;
1773 }
1774
1775 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1776 * performance counter groups, so be careful when changing this and related
1777 * functions.
1778 */
1779 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1780 unsigned index,
1781 struct pipe_driver_query_group_info *info)
1782 {
1783 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1784 unsigned num_pc_groups = 0;
1785
1786 if (rscreen->perfcounters)
1787 num_pc_groups = rscreen->perfcounters->num_groups;
1788
1789 if (!info)
1790 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1791
1792 if (index < num_pc_groups)
1793 return r600_get_perfcounter_group_info(rscreen, index, info);
1794
1795 index -= num_pc_groups;
1796 if (index >= R600_NUM_SW_QUERY_GROUPS)
1797 return 0;
1798
1799 info->name = "GPIN";
1800 info->max_active_queries = 5;
1801 info->num_queries = 5;
1802 return 1;
1803 }
1804
1805 void r600_query_init(struct r600_common_context *rctx)
1806 {
1807 rctx->b.create_query = r600_create_query;
1808 rctx->b.create_batch_query = r600_create_batch_query;
1809 rctx->b.destroy_query = r600_destroy_query;
1810 rctx->b.begin_query = r600_begin_query;
1811 rctx->b.end_query = r600_end_query;
1812 rctx->b.get_query_result = r600_get_query_result;
1813 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1814 rctx->render_cond_atom.emit = r600_emit_query_predication;
1815
1816 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1817 rctx->b.render_condition = r600_render_condition;
1818
1819 LIST_INITHEAD(&rctx->active_queries);
1820 }
1821
1822 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1823 {
1824 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1825 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1826 }