radeonsi/gfx9: query changes - EVENT_WRITE and SET_PREDICATION
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46
47 uint64_t begin_time;
48 uint64_t end_time;
49
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle *fence;
52 };
53
54 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
55 struct r600_query *rquery)
56 {
57 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
58
59 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
60 FREE(query);
61 }
62
63 static enum radeon_value_id winsys_id_from_type(unsigned type)
64 {
65 switch (type) {
66 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
67 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
68 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
69 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
70 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
71 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
72 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
73 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
74 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
75 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
76 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
77 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
78 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
79 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
80 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
81 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
82 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
83 default: unreachable("query type does not correspond to winsys id");
84 }
85 }
86
87 static bool r600_query_sw_begin(struct r600_common_context *rctx,
88 struct r600_query *rquery)
89 {
90 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
91 enum radeon_value_id ws_id;
92
93 switch(query->b.type) {
94 case PIPE_QUERY_TIMESTAMP_DISJOINT:
95 case PIPE_QUERY_GPU_FINISHED:
96 break;
97 case R600_QUERY_DRAW_CALLS:
98 query->begin_result = rctx->num_draw_calls;
99 break;
100 case R600_QUERY_SPILL_DRAW_CALLS:
101 query->begin_result = rctx->num_spill_draw_calls;
102 break;
103 case R600_QUERY_COMPUTE_CALLS:
104 query->begin_result = rctx->num_compute_calls;
105 break;
106 case R600_QUERY_SPILL_COMPUTE_CALLS:
107 query->begin_result = rctx->num_spill_compute_calls;
108 break;
109 case R600_QUERY_DMA_CALLS:
110 query->begin_result = rctx->num_dma_calls;
111 break;
112 case R600_QUERY_CP_DMA_CALLS:
113 query->begin_result = rctx->num_cp_dma_calls;
114 break;
115 case R600_QUERY_NUM_VS_FLUSHES:
116 query->begin_result = rctx->num_vs_flushes;
117 break;
118 case R600_QUERY_NUM_PS_FLUSHES:
119 query->begin_result = rctx->num_ps_flushes;
120 break;
121 case R600_QUERY_NUM_CS_FLUSHES:
122 query->begin_result = rctx->num_cs_flushes;
123 break;
124 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
125 query->begin_result = rctx->num_fb_cache_flushes;
126 break;
127 case R600_QUERY_NUM_L2_INVALIDATES:
128 query->begin_result = rctx->num_L2_invalidates;
129 break;
130 case R600_QUERY_NUM_L2_WRITEBACKS:
131 query->begin_result = rctx->num_L2_writebacks;
132 break;
133 case R600_QUERY_REQUESTED_VRAM:
134 case R600_QUERY_REQUESTED_GTT:
135 case R600_QUERY_MAPPED_VRAM:
136 case R600_QUERY_MAPPED_GTT:
137 case R600_QUERY_VRAM_USAGE:
138 case R600_QUERY_VRAM_VIS_USAGE:
139 case R600_QUERY_GTT_USAGE:
140 case R600_QUERY_GPU_TEMPERATURE:
141 case R600_QUERY_CURRENT_GPU_SCLK:
142 case R600_QUERY_CURRENT_GPU_MCLK:
143 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
144 case R600_QUERY_NUM_MAPPED_BUFFERS:
145 query->begin_result = 0;
146 break;
147 case R600_QUERY_BUFFER_WAIT_TIME:
148 case R600_QUERY_NUM_GFX_IBS:
149 case R600_QUERY_NUM_SDMA_IBS:
150 case R600_QUERY_NUM_BYTES_MOVED:
151 case R600_QUERY_NUM_EVICTIONS: {
152 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
153 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
154 break;
155 }
156 case R600_QUERY_CS_THREAD_BUSY:
157 ws_id = winsys_id_from_type(query->b.type);
158 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
159 query->begin_time = os_time_get_nano();
160 break;
161 case R600_QUERY_GPU_LOAD:
162 case R600_QUERY_GPU_SHADERS_BUSY:
163 case R600_QUERY_GPU_TA_BUSY:
164 case R600_QUERY_GPU_GDS_BUSY:
165 case R600_QUERY_GPU_VGT_BUSY:
166 case R600_QUERY_GPU_IA_BUSY:
167 case R600_QUERY_GPU_SX_BUSY:
168 case R600_QUERY_GPU_WD_BUSY:
169 case R600_QUERY_GPU_BCI_BUSY:
170 case R600_QUERY_GPU_SC_BUSY:
171 case R600_QUERY_GPU_PA_BUSY:
172 case R600_QUERY_GPU_DB_BUSY:
173 case R600_QUERY_GPU_CP_BUSY:
174 case R600_QUERY_GPU_CB_BUSY:
175 case R600_QUERY_GPU_SDMA_BUSY:
176 case R600_QUERY_GPU_PFP_BUSY:
177 case R600_QUERY_GPU_MEQ_BUSY:
178 case R600_QUERY_GPU_ME_BUSY:
179 case R600_QUERY_GPU_SURF_SYNC_BUSY:
180 case R600_QUERY_GPU_DMA_BUSY:
181 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
182 case R600_QUERY_GPU_CE_BUSY:
183 query->begin_result = r600_begin_counter(rctx->screen,
184 query->b.type);
185 break;
186 case R600_QUERY_NUM_COMPILATIONS:
187 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
188 break;
189 case R600_QUERY_NUM_SHADERS_CREATED:
190 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
191 break;
192 case R600_QUERY_NUM_SHADER_CACHE_HITS:
193 query->begin_result =
194 p_atomic_read(&rctx->screen->num_shader_cache_hits);
195 break;
196 case R600_QUERY_GPIN_ASIC_ID:
197 case R600_QUERY_GPIN_NUM_SIMD:
198 case R600_QUERY_GPIN_NUM_RB:
199 case R600_QUERY_GPIN_NUM_SPI:
200 case R600_QUERY_GPIN_NUM_SE:
201 break;
202 default:
203 unreachable("r600_query_sw_begin: bad query type");
204 }
205
206 return true;
207 }
208
209 static bool r600_query_sw_end(struct r600_common_context *rctx,
210 struct r600_query *rquery)
211 {
212 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
213 enum radeon_value_id ws_id;
214
215 switch(query->b.type) {
216 case PIPE_QUERY_TIMESTAMP_DISJOINT:
217 break;
218 case PIPE_QUERY_GPU_FINISHED:
219 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
220 break;
221 case R600_QUERY_DRAW_CALLS:
222 query->end_result = rctx->num_draw_calls;
223 break;
224 case R600_QUERY_SPILL_DRAW_CALLS:
225 query->end_result = rctx->num_spill_draw_calls;
226 break;
227 case R600_QUERY_COMPUTE_CALLS:
228 query->end_result = rctx->num_compute_calls;
229 break;
230 case R600_QUERY_SPILL_COMPUTE_CALLS:
231 query->end_result = rctx->num_spill_compute_calls;
232 break;
233 case R600_QUERY_DMA_CALLS:
234 query->end_result = rctx->num_dma_calls;
235 break;
236 case R600_QUERY_CP_DMA_CALLS:
237 query->end_result = rctx->num_cp_dma_calls;
238 break;
239 case R600_QUERY_NUM_VS_FLUSHES:
240 query->end_result = rctx->num_vs_flushes;
241 break;
242 case R600_QUERY_NUM_PS_FLUSHES:
243 query->end_result = rctx->num_ps_flushes;
244 break;
245 case R600_QUERY_NUM_CS_FLUSHES:
246 query->end_result = rctx->num_cs_flushes;
247 break;
248 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
249 query->end_result = rctx->num_fb_cache_flushes;
250 break;
251 case R600_QUERY_NUM_L2_INVALIDATES:
252 query->end_result = rctx->num_L2_invalidates;
253 break;
254 case R600_QUERY_NUM_L2_WRITEBACKS:
255 query->end_result = rctx->num_L2_writebacks;
256 break;
257 case R600_QUERY_REQUESTED_VRAM:
258 case R600_QUERY_REQUESTED_GTT:
259 case R600_QUERY_MAPPED_VRAM:
260 case R600_QUERY_MAPPED_GTT:
261 case R600_QUERY_VRAM_USAGE:
262 case R600_QUERY_VRAM_VIS_USAGE:
263 case R600_QUERY_GTT_USAGE:
264 case R600_QUERY_GPU_TEMPERATURE:
265 case R600_QUERY_CURRENT_GPU_SCLK:
266 case R600_QUERY_CURRENT_GPU_MCLK:
267 case R600_QUERY_BUFFER_WAIT_TIME:
268 case R600_QUERY_NUM_MAPPED_BUFFERS:
269 case R600_QUERY_NUM_GFX_IBS:
270 case R600_QUERY_NUM_SDMA_IBS:
271 case R600_QUERY_NUM_BYTES_MOVED:
272 case R600_QUERY_NUM_EVICTIONS: {
273 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
274 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
275 break;
276 }
277 case R600_QUERY_CS_THREAD_BUSY:
278 ws_id = winsys_id_from_type(query->b.type);
279 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
280 query->end_time = os_time_get_nano();
281 break;
282 case R600_QUERY_GPU_LOAD:
283 case R600_QUERY_GPU_SHADERS_BUSY:
284 case R600_QUERY_GPU_TA_BUSY:
285 case R600_QUERY_GPU_GDS_BUSY:
286 case R600_QUERY_GPU_VGT_BUSY:
287 case R600_QUERY_GPU_IA_BUSY:
288 case R600_QUERY_GPU_SX_BUSY:
289 case R600_QUERY_GPU_WD_BUSY:
290 case R600_QUERY_GPU_BCI_BUSY:
291 case R600_QUERY_GPU_SC_BUSY:
292 case R600_QUERY_GPU_PA_BUSY:
293 case R600_QUERY_GPU_DB_BUSY:
294 case R600_QUERY_GPU_CP_BUSY:
295 case R600_QUERY_GPU_CB_BUSY:
296 case R600_QUERY_GPU_SDMA_BUSY:
297 case R600_QUERY_GPU_PFP_BUSY:
298 case R600_QUERY_GPU_MEQ_BUSY:
299 case R600_QUERY_GPU_ME_BUSY:
300 case R600_QUERY_GPU_SURF_SYNC_BUSY:
301 case R600_QUERY_GPU_DMA_BUSY:
302 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
303 case R600_QUERY_GPU_CE_BUSY:
304 query->end_result = r600_end_counter(rctx->screen,
305 query->b.type,
306 query->begin_result);
307 query->begin_result = 0;
308 break;
309 case R600_QUERY_NUM_COMPILATIONS:
310 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
311 break;
312 case R600_QUERY_NUM_SHADERS_CREATED:
313 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
314 break;
315 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
316 query->end_result = rctx->last_tex_ps_draw_ratio;
317 break;
318 case R600_QUERY_NUM_SHADER_CACHE_HITS:
319 query->end_result =
320 p_atomic_read(&rctx->screen->num_shader_cache_hits);
321 break;
322 case R600_QUERY_GPIN_ASIC_ID:
323 case R600_QUERY_GPIN_NUM_SIMD:
324 case R600_QUERY_GPIN_NUM_RB:
325 case R600_QUERY_GPIN_NUM_SPI:
326 case R600_QUERY_GPIN_NUM_SE:
327 break;
328 default:
329 unreachable("r600_query_sw_end: bad query type");
330 }
331
332 return true;
333 }
334
335 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
336 struct r600_query *rquery,
337 bool wait,
338 union pipe_query_result *result)
339 {
340 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
341
342 switch (query->b.type) {
343 case PIPE_QUERY_TIMESTAMP_DISJOINT:
344 /* Convert from cycles per millisecond to cycles per second (Hz). */
345 result->timestamp_disjoint.frequency =
346 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
347 result->timestamp_disjoint.disjoint = false;
348 return true;
349 case PIPE_QUERY_GPU_FINISHED: {
350 struct pipe_screen *screen = rctx->b.screen;
351 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
352 wait ? PIPE_TIMEOUT_INFINITE : 0);
353 return result->b;
354 }
355
356 case R600_QUERY_CS_THREAD_BUSY:
357 result->u64 = (query->end_result - query->begin_result) * 100 /
358 (query->end_time - query->begin_time);
359 return true;
360 case R600_QUERY_GPIN_ASIC_ID:
361 result->u32 = 0;
362 return true;
363 case R600_QUERY_GPIN_NUM_SIMD:
364 result->u32 = rctx->screen->info.num_good_compute_units;
365 return true;
366 case R600_QUERY_GPIN_NUM_RB:
367 result->u32 = rctx->screen->info.num_render_backends;
368 return true;
369 case R600_QUERY_GPIN_NUM_SPI:
370 result->u32 = 1; /* all supported chips have one SPI per SE */
371 return true;
372 case R600_QUERY_GPIN_NUM_SE:
373 result->u32 = rctx->screen->info.max_se;
374 return true;
375 }
376
377 result->u64 = query->end_result - query->begin_result;
378
379 switch (query->b.type) {
380 case R600_QUERY_BUFFER_WAIT_TIME:
381 case R600_QUERY_GPU_TEMPERATURE:
382 result->u64 /= 1000;
383 break;
384 case R600_QUERY_CURRENT_GPU_SCLK:
385 case R600_QUERY_CURRENT_GPU_MCLK:
386 result->u64 *= 1000000;
387 break;
388 }
389
390 return true;
391 }
392
393
394 static struct r600_query_ops sw_query_ops = {
395 .destroy = r600_query_sw_destroy,
396 .begin = r600_query_sw_begin,
397 .end = r600_query_sw_end,
398 .get_result = r600_query_sw_get_result,
399 .get_result_resource = NULL
400 };
401
402 static struct pipe_query *r600_query_sw_create(unsigned query_type)
403 {
404 struct r600_query_sw *query;
405
406 query = CALLOC_STRUCT(r600_query_sw);
407 if (!query)
408 return NULL;
409
410 query->b.type = query_type;
411 query->b.ops = &sw_query_ops;
412
413 return (struct pipe_query *)query;
414 }
415
416 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
417 struct r600_query *rquery)
418 {
419 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
420 struct r600_query_buffer *prev = query->buffer.previous;
421
422 /* Release all query buffers. */
423 while (prev) {
424 struct r600_query_buffer *qbuf = prev;
425 prev = prev->previous;
426 r600_resource_reference(&qbuf->buf, NULL);
427 FREE(qbuf);
428 }
429
430 r600_resource_reference(&query->buffer.buf, NULL);
431 FREE(rquery);
432 }
433
434 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
435 struct r600_query_hw *query)
436 {
437 unsigned buf_size = MAX2(query->result_size,
438 rscreen->info.min_alloc_size);
439
440 /* Queries are normally read by the CPU after
441 * being written by the gpu, hence staging is probably a good
442 * usage pattern.
443 */
444 struct r600_resource *buf = (struct r600_resource*)
445 pipe_buffer_create(&rscreen->b, 0,
446 PIPE_USAGE_STAGING, buf_size);
447 if (!buf)
448 return NULL;
449
450 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
451 r600_resource_reference(&buf, NULL);
452 return NULL;
453 }
454
455 return buf;
456 }
457
458 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
459 struct r600_query_hw *query,
460 struct r600_resource *buffer)
461 {
462 /* Callers ensure that the buffer is currently unused by the GPU. */
463 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
464 PIPE_TRANSFER_WRITE |
465 PIPE_TRANSFER_UNSYNCHRONIZED);
466 if (!results)
467 return false;
468
469 memset(results, 0, buffer->b.b.width0);
470
471 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
472 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
473 unsigned max_rbs = rscreen->info.num_render_backends;
474 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
475 unsigned num_results;
476 unsigned i, j;
477
478 /* Set top bits for unused backends. */
479 num_results = buffer->b.b.width0 / query->result_size;
480 for (j = 0; j < num_results; j++) {
481 for (i = 0; i < max_rbs; i++) {
482 if (!(enabled_rb_mask & (1<<i))) {
483 results[(i * 4)+1] = 0x80000000;
484 results[(i * 4)+3] = 0x80000000;
485 }
486 }
487 results += 4 * max_rbs;
488 }
489 }
490
491 return true;
492 }
493
494 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
495 struct r600_query *rquery,
496 bool wait,
497 enum pipe_query_value_type result_type,
498 int index,
499 struct pipe_resource *resource,
500 unsigned offset);
501
502 static struct r600_query_ops query_hw_ops = {
503 .destroy = r600_query_hw_destroy,
504 .begin = r600_query_hw_begin,
505 .end = r600_query_hw_end,
506 .get_result = r600_query_hw_get_result,
507 .get_result_resource = r600_query_hw_get_result_resource,
508 };
509
510 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
511 struct r600_query_hw *query,
512 struct r600_resource *buffer,
513 uint64_t va);
514 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
515 struct r600_query_hw *query,
516 struct r600_resource *buffer,
517 uint64_t va);
518 static void r600_query_hw_add_result(struct r600_common_context *ctx,
519 struct r600_query_hw *, void *buffer,
520 union pipe_query_result *result);
521 static void r600_query_hw_clear_result(struct r600_query_hw *,
522 union pipe_query_result *);
523
524 static struct r600_query_hw_ops query_hw_default_hw_ops = {
525 .prepare_buffer = r600_query_hw_prepare_buffer,
526 .emit_start = r600_query_hw_do_emit_start,
527 .emit_stop = r600_query_hw_do_emit_stop,
528 .clear_result = r600_query_hw_clear_result,
529 .add_result = r600_query_hw_add_result,
530 };
531
532 bool r600_query_hw_init(struct r600_common_screen *rscreen,
533 struct r600_query_hw *query)
534 {
535 query->buffer.buf = r600_new_query_buffer(rscreen, query);
536 if (!query->buffer.buf)
537 return false;
538
539 return true;
540 }
541
542 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
543 unsigned query_type,
544 unsigned index)
545 {
546 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
547 if (!query)
548 return NULL;
549
550 query->b.type = query_type;
551 query->b.ops = &query_hw_ops;
552 query->ops = &query_hw_default_hw_ops;
553
554 switch (query_type) {
555 case PIPE_QUERY_OCCLUSION_COUNTER:
556 case PIPE_QUERY_OCCLUSION_PREDICATE:
557 query->result_size = 16 * rscreen->info.num_render_backends;
558 query->result_size += 16; /* for the fence + alignment */
559 query->num_cs_dw_begin = 6;
560 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
561 break;
562 case PIPE_QUERY_TIME_ELAPSED:
563 query->result_size = 24;
564 query->num_cs_dw_begin = 8;
565 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
566 break;
567 case PIPE_QUERY_TIMESTAMP:
568 query->result_size = 16;
569 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
570 query->flags = R600_QUERY_HW_FLAG_NO_START;
571 break;
572 case PIPE_QUERY_PRIMITIVES_EMITTED:
573 case PIPE_QUERY_PRIMITIVES_GENERATED:
574 case PIPE_QUERY_SO_STATISTICS:
575 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
576 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
577 query->result_size = 32;
578 query->num_cs_dw_begin = 6;
579 query->num_cs_dw_end = 6;
580 query->stream = index;
581 break;
582 case PIPE_QUERY_PIPELINE_STATISTICS:
583 /* 11 values on EG, 8 on R600. */
584 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
585 query->result_size += 8; /* for the fence + alignment */
586 query->num_cs_dw_begin = 6;
587 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
588 break;
589 default:
590 assert(0);
591 FREE(query);
592 return NULL;
593 }
594
595 if (!r600_query_hw_init(rscreen, query)) {
596 FREE(query);
597 return NULL;
598 }
599
600 return (struct pipe_query *)query;
601 }
602
603 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
604 unsigned type, int diff)
605 {
606 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
607 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
608 bool old_enable = rctx->num_occlusion_queries != 0;
609 bool old_perfect_enable =
610 rctx->num_perfect_occlusion_queries != 0;
611 bool enable, perfect_enable;
612
613 rctx->num_occlusion_queries += diff;
614 assert(rctx->num_occlusion_queries >= 0);
615
616 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
617 rctx->num_perfect_occlusion_queries += diff;
618 assert(rctx->num_perfect_occlusion_queries >= 0);
619 }
620
621 enable = rctx->num_occlusion_queries != 0;
622 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
623
624 if (enable != old_enable || perfect_enable != old_perfect_enable) {
625 rctx->set_occlusion_query_state(&rctx->b, enable);
626 }
627 }
628 }
629
630 static unsigned event_type_for_stream(struct r600_query_hw *query)
631 {
632 switch (query->stream) {
633 default:
634 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
635 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
636 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
637 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
638 }
639 }
640
641 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
642 struct r600_query_hw *query,
643 struct r600_resource *buffer,
644 uint64_t va)
645 {
646 struct radeon_winsys_cs *cs = ctx->gfx.cs;
647
648 switch (query->b.type) {
649 case PIPE_QUERY_OCCLUSION_COUNTER:
650 case PIPE_QUERY_OCCLUSION_PREDICATE:
651 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
652 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
653 radeon_emit(cs, va);
654 radeon_emit(cs, va >> 32);
655 break;
656 case PIPE_QUERY_PRIMITIVES_EMITTED:
657 case PIPE_QUERY_PRIMITIVES_GENERATED:
658 case PIPE_QUERY_SO_STATISTICS:
659 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
660 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
661 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
662 radeon_emit(cs, va);
663 radeon_emit(cs, va >> 32);
664 break;
665 case PIPE_QUERY_TIME_ELAPSED:
666 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
667 0, 3, NULL, va, 0, 0);
668 break;
669 case PIPE_QUERY_PIPELINE_STATISTICS:
670 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
671 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
672 radeon_emit(cs, va);
673 radeon_emit(cs, va >> 32);
674 break;
675 default:
676 assert(0);
677 }
678 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
679 RADEON_PRIO_QUERY);
680 }
681
682 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
683 struct r600_query_hw *query)
684 {
685 uint64_t va;
686
687 if (!query->buffer.buf)
688 return; // previous buffer allocation failure
689
690 r600_update_occlusion_query_state(ctx, query->b.type, 1);
691 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
692
693 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
694 true);
695
696 /* Get a new query buffer if needed. */
697 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
698 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
699 *qbuf = query->buffer;
700 query->buffer.results_end = 0;
701 query->buffer.previous = qbuf;
702 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
703 if (!query->buffer.buf)
704 return;
705 }
706
707 /* emit begin query */
708 va = query->buffer.buf->gpu_address + query->buffer.results_end;
709
710 query->ops->emit_start(ctx, query, query->buffer.buf, va);
711
712 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
713 }
714
715 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
716 struct r600_query_hw *query,
717 struct r600_resource *buffer,
718 uint64_t va)
719 {
720 struct radeon_winsys_cs *cs = ctx->gfx.cs;
721 uint64_t fence_va = 0;
722
723 switch (query->b.type) {
724 case PIPE_QUERY_OCCLUSION_COUNTER:
725 case PIPE_QUERY_OCCLUSION_PREDICATE:
726 va += 8;
727 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
728 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
729 radeon_emit(cs, va);
730 radeon_emit(cs, va >> 32);
731
732 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
733 break;
734 case PIPE_QUERY_PRIMITIVES_EMITTED:
735 case PIPE_QUERY_PRIMITIVES_GENERATED:
736 case PIPE_QUERY_SO_STATISTICS:
737 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
738 va += query->result_size/2;
739 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
740 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
741 radeon_emit(cs, va);
742 radeon_emit(cs, va >> 32);
743 break;
744 case PIPE_QUERY_TIME_ELAPSED:
745 va += 8;
746 /* fall through */
747 case PIPE_QUERY_TIMESTAMP:
748 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
749 0, 3, NULL, va, 0, 0);
750 fence_va = va + 8;
751 break;
752 case PIPE_QUERY_PIPELINE_STATISTICS: {
753 unsigned sample_size = (query->result_size - 8) / 2;
754
755 va += sample_size;
756 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
757 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
758 radeon_emit(cs, va);
759 radeon_emit(cs, va >> 32);
760
761 fence_va = va + sample_size;
762 break;
763 }
764 default:
765 assert(0);
766 }
767 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
768 RADEON_PRIO_QUERY);
769
770 if (fence_va)
771 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
772 query->buffer.buf, fence_va, 0, 0x80000000);
773 }
774
775 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
776 struct r600_query_hw *query)
777 {
778 uint64_t va;
779
780 if (!query->buffer.buf)
781 return; // previous buffer allocation failure
782
783 /* The queries which need begin already called this in begin_query. */
784 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
785 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
786 }
787
788 /* emit end query */
789 va = query->buffer.buf->gpu_address + query->buffer.results_end;
790
791 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
792
793 query->buffer.results_end += query->result_size;
794
795 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
796 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
797
798 r600_update_occlusion_query_state(ctx, query->b.type, -1);
799 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
800 }
801
802 static void r600_emit_query_predication(struct r600_common_context *ctx,
803 struct r600_atom *atom)
804 {
805 struct radeon_winsys_cs *cs = ctx->gfx.cs;
806 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
807 struct r600_query_buffer *qbuf;
808 uint32_t op;
809 bool flag_wait;
810
811 if (!query)
812 return;
813
814 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
815 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
816
817 switch (query->b.type) {
818 case PIPE_QUERY_OCCLUSION_COUNTER:
819 case PIPE_QUERY_OCCLUSION_PREDICATE:
820 op = PRED_OP(PREDICATION_OP_ZPASS);
821 break;
822 case PIPE_QUERY_PRIMITIVES_EMITTED:
823 case PIPE_QUERY_PRIMITIVES_GENERATED:
824 case PIPE_QUERY_SO_STATISTICS:
825 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
826 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
827 break;
828 default:
829 assert(0);
830 return;
831 }
832
833 /* if true then invert, see GL_ARB_conditional_render_inverted */
834 if (ctx->render_cond_invert)
835 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
836 else
837 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
838
839 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
840
841 /* emit predicate packets for all data blocks */
842 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
843 unsigned results_base = 0;
844 uint64_t va_base = qbuf->buf->gpu_address;
845
846 while (results_base < qbuf->results_end) {
847 uint64_t va = va_base + results_base;
848
849 if (ctx->chip_class >= GFX9) {
850 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
851 radeon_emit(cs, op);
852 radeon_emit(cs, va);
853 radeon_emit(cs, va >> 32);
854 } else {
855 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
856 radeon_emit(cs, va);
857 radeon_emit(cs, op | ((va >> 32) & 0xFF));
858 }
859 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
860 RADEON_PRIO_QUERY);
861 results_base += query->result_size;
862
863 /* set CONTINUE bit for all packets except the first */
864 op |= PREDICATION_CONTINUE;
865 }
866 }
867 }
868
869 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
870 {
871 struct r600_common_screen *rscreen =
872 (struct r600_common_screen *)ctx->screen;
873
874 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
875 query_type == PIPE_QUERY_GPU_FINISHED ||
876 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
877 return r600_query_sw_create(query_type);
878
879 return r600_query_hw_create(rscreen, query_type, index);
880 }
881
882 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
883 {
884 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
885 struct r600_query *rquery = (struct r600_query *)query;
886
887 rquery->ops->destroy(rctx->screen, rquery);
888 }
889
890 static boolean r600_begin_query(struct pipe_context *ctx,
891 struct pipe_query *query)
892 {
893 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
894 struct r600_query *rquery = (struct r600_query *)query;
895
896 return rquery->ops->begin(rctx, rquery);
897 }
898
899 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
900 struct r600_query_hw *query)
901 {
902 struct r600_query_buffer *prev = query->buffer.previous;
903
904 /* Discard the old query buffers. */
905 while (prev) {
906 struct r600_query_buffer *qbuf = prev;
907 prev = prev->previous;
908 r600_resource_reference(&qbuf->buf, NULL);
909 FREE(qbuf);
910 }
911
912 query->buffer.results_end = 0;
913 query->buffer.previous = NULL;
914
915 /* Obtain a new buffer if the current one can't be mapped without a stall. */
916 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
917 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
918 r600_resource_reference(&query->buffer.buf, NULL);
919 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
920 } else {
921 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
922 r600_resource_reference(&query->buffer.buf, NULL);
923 }
924 }
925
926 bool r600_query_hw_begin(struct r600_common_context *rctx,
927 struct r600_query *rquery)
928 {
929 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
930
931 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
932 assert(0);
933 return false;
934 }
935
936 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
937 r600_query_hw_reset_buffers(rctx, query);
938
939 r600_query_hw_emit_start(rctx, query);
940 if (!query->buffer.buf)
941 return false;
942
943 LIST_ADDTAIL(&query->list, &rctx->active_queries);
944 return true;
945 }
946
947 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
948 {
949 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
950 struct r600_query *rquery = (struct r600_query *)query;
951
952 return rquery->ops->end(rctx, rquery);
953 }
954
955 bool r600_query_hw_end(struct r600_common_context *rctx,
956 struct r600_query *rquery)
957 {
958 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
959
960 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
961 r600_query_hw_reset_buffers(rctx, query);
962
963 r600_query_hw_emit_stop(rctx, query);
964
965 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
966 LIST_DELINIT(&query->list);
967
968 if (!query->buffer.buf)
969 return false;
970
971 return true;
972 }
973
974 static void r600_get_hw_query_params(struct r600_common_context *rctx,
975 struct r600_query_hw *rquery, int index,
976 struct r600_hw_query_params *params)
977 {
978 unsigned max_rbs = rctx->screen->info.num_render_backends;
979
980 params->pair_stride = 0;
981 params->pair_count = 1;
982
983 switch (rquery->b.type) {
984 case PIPE_QUERY_OCCLUSION_COUNTER:
985 case PIPE_QUERY_OCCLUSION_PREDICATE:
986 params->start_offset = 0;
987 params->end_offset = 8;
988 params->fence_offset = max_rbs * 16;
989 params->pair_stride = 16;
990 params->pair_count = max_rbs;
991 break;
992 case PIPE_QUERY_TIME_ELAPSED:
993 params->start_offset = 0;
994 params->end_offset = 8;
995 params->fence_offset = 16;
996 break;
997 case PIPE_QUERY_TIMESTAMP:
998 params->start_offset = 0;
999 params->end_offset = 0;
1000 params->fence_offset = 8;
1001 break;
1002 case PIPE_QUERY_PRIMITIVES_EMITTED:
1003 params->start_offset = 8;
1004 params->end_offset = 24;
1005 params->fence_offset = params->end_offset + 4;
1006 break;
1007 case PIPE_QUERY_PRIMITIVES_GENERATED:
1008 params->start_offset = 0;
1009 params->end_offset = 16;
1010 params->fence_offset = params->end_offset + 4;
1011 break;
1012 case PIPE_QUERY_SO_STATISTICS:
1013 params->start_offset = 8 - index * 8;
1014 params->end_offset = 24 - index * 8;
1015 params->fence_offset = params->end_offset + 4;
1016 break;
1017 case PIPE_QUERY_PIPELINE_STATISTICS:
1018 {
1019 /* Offsets apply to EG+ */
1020 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1021 params->start_offset = offsets[index];
1022 params->end_offset = 88 + offsets[index];
1023 params->fence_offset = 2 * 88;
1024 break;
1025 }
1026 default:
1027 unreachable("r600_get_hw_query_params unsupported");
1028 }
1029 }
1030
1031 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1032 bool test_status_bit)
1033 {
1034 uint32_t *current_result = (uint32_t*)map;
1035 uint64_t start, end;
1036
1037 start = (uint64_t)current_result[start_index] |
1038 (uint64_t)current_result[start_index+1] << 32;
1039 end = (uint64_t)current_result[end_index] |
1040 (uint64_t)current_result[end_index+1] << 32;
1041
1042 if (!test_status_bit ||
1043 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1044 return end - start;
1045 }
1046 return 0;
1047 }
1048
1049 static void r600_query_hw_add_result(struct r600_common_context *ctx,
1050 struct r600_query_hw *query,
1051 void *buffer,
1052 union pipe_query_result *result)
1053 {
1054 unsigned max_rbs = ctx->screen->info.num_render_backends;
1055
1056 switch (query->b.type) {
1057 case PIPE_QUERY_OCCLUSION_COUNTER: {
1058 for (unsigned i = 0; i < max_rbs; ++i) {
1059 unsigned results_base = i * 16;
1060 result->u64 +=
1061 r600_query_read_result(buffer + results_base, 0, 2, true);
1062 }
1063 break;
1064 }
1065 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1066 for (unsigned i = 0; i < max_rbs; ++i) {
1067 unsigned results_base = i * 16;
1068 result->b = result->b ||
1069 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1070 }
1071 break;
1072 }
1073 case PIPE_QUERY_TIME_ELAPSED:
1074 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1075 break;
1076 case PIPE_QUERY_TIMESTAMP:
1077 result->u64 = *(uint64_t*)buffer;
1078 break;
1079 case PIPE_QUERY_PRIMITIVES_EMITTED:
1080 /* SAMPLE_STREAMOUTSTATS stores this structure:
1081 * {
1082 * u64 NumPrimitivesWritten;
1083 * u64 PrimitiveStorageNeeded;
1084 * }
1085 * We only need NumPrimitivesWritten here. */
1086 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1087 break;
1088 case PIPE_QUERY_PRIMITIVES_GENERATED:
1089 /* Here we read PrimitiveStorageNeeded. */
1090 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1091 break;
1092 case PIPE_QUERY_SO_STATISTICS:
1093 result->so_statistics.num_primitives_written +=
1094 r600_query_read_result(buffer, 2, 6, true);
1095 result->so_statistics.primitives_storage_needed +=
1096 r600_query_read_result(buffer, 0, 4, true);
1097 break;
1098 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1099 result->b = result->b ||
1100 r600_query_read_result(buffer, 2, 6, true) !=
1101 r600_query_read_result(buffer, 0, 4, true);
1102 break;
1103 case PIPE_QUERY_PIPELINE_STATISTICS:
1104 if (ctx->chip_class >= EVERGREEN) {
1105 result->pipeline_statistics.ps_invocations +=
1106 r600_query_read_result(buffer, 0, 22, false);
1107 result->pipeline_statistics.c_primitives +=
1108 r600_query_read_result(buffer, 2, 24, false);
1109 result->pipeline_statistics.c_invocations +=
1110 r600_query_read_result(buffer, 4, 26, false);
1111 result->pipeline_statistics.vs_invocations +=
1112 r600_query_read_result(buffer, 6, 28, false);
1113 result->pipeline_statistics.gs_invocations +=
1114 r600_query_read_result(buffer, 8, 30, false);
1115 result->pipeline_statistics.gs_primitives +=
1116 r600_query_read_result(buffer, 10, 32, false);
1117 result->pipeline_statistics.ia_primitives +=
1118 r600_query_read_result(buffer, 12, 34, false);
1119 result->pipeline_statistics.ia_vertices +=
1120 r600_query_read_result(buffer, 14, 36, false);
1121 result->pipeline_statistics.hs_invocations +=
1122 r600_query_read_result(buffer, 16, 38, false);
1123 result->pipeline_statistics.ds_invocations +=
1124 r600_query_read_result(buffer, 18, 40, false);
1125 result->pipeline_statistics.cs_invocations +=
1126 r600_query_read_result(buffer, 20, 42, false);
1127 } else {
1128 result->pipeline_statistics.ps_invocations +=
1129 r600_query_read_result(buffer, 0, 16, false);
1130 result->pipeline_statistics.c_primitives +=
1131 r600_query_read_result(buffer, 2, 18, false);
1132 result->pipeline_statistics.c_invocations +=
1133 r600_query_read_result(buffer, 4, 20, false);
1134 result->pipeline_statistics.vs_invocations +=
1135 r600_query_read_result(buffer, 6, 22, false);
1136 result->pipeline_statistics.gs_invocations +=
1137 r600_query_read_result(buffer, 8, 24, false);
1138 result->pipeline_statistics.gs_primitives +=
1139 r600_query_read_result(buffer, 10, 26, false);
1140 result->pipeline_statistics.ia_primitives +=
1141 r600_query_read_result(buffer, 12, 28, false);
1142 result->pipeline_statistics.ia_vertices +=
1143 r600_query_read_result(buffer, 14, 30, false);
1144 }
1145 #if 0 /* for testing */
1146 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1147 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1148 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1149 result->pipeline_statistics.ia_vertices,
1150 result->pipeline_statistics.ia_primitives,
1151 result->pipeline_statistics.vs_invocations,
1152 result->pipeline_statistics.hs_invocations,
1153 result->pipeline_statistics.ds_invocations,
1154 result->pipeline_statistics.gs_invocations,
1155 result->pipeline_statistics.gs_primitives,
1156 result->pipeline_statistics.c_invocations,
1157 result->pipeline_statistics.c_primitives,
1158 result->pipeline_statistics.ps_invocations,
1159 result->pipeline_statistics.cs_invocations);
1160 #endif
1161 break;
1162 default:
1163 assert(0);
1164 }
1165 }
1166
1167 static boolean r600_get_query_result(struct pipe_context *ctx,
1168 struct pipe_query *query, boolean wait,
1169 union pipe_query_result *result)
1170 {
1171 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1172 struct r600_query *rquery = (struct r600_query *)query;
1173
1174 return rquery->ops->get_result(rctx, rquery, wait, result);
1175 }
1176
1177 static void r600_get_query_result_resource(struct pipe_context *ctx,
1178 struct pipe_query *query,
1179 boolean wait,
1180 enum pipe_query_value_type result_type,
1181 int index,
1182 struct pipe_resource *resource,
1183 unsigned offset)
1184 {
1185 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1186 struct r600_query *rquery = (struct r600_query *)query;
1187
1188 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1189 resource, offset);
1190 }
1191
1192 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1193 union pipe_query_result *result)
1194 {
1195 util_query_clear_result(result, query->b.type);
1196 }
1197
1198 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1199 struct r600_query *rquery,
1200 bool wait, union pipe_query_result *result)
1201 {
1202 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1203 struct r600_query_buffer *qbuf;
1204
1205 query->ops->clear_result(query, result);
1206
1207 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1208 unsigned results_base = 0;
1209 void *map;
1210
1211 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1212 PIPE_TRANSFER_READ |
1213 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1214 if (!map)
1215 return false;
1216
1217 while (results_base != qbuf->results_end) {
1218 query->ops->add_result(rctx, query, map + results_base,
1219 result);
1220 results_base += query->result_size;
1221 }
1222 }
1223
1224 /* Convert the time to expected units. */
1225 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1226 rquery->type == PIPE_QUERY_TIMESTAMP) {
1227 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1228 }
1229 return true;
1230 }
1231
1232 /* Create the compute shader that is used to collect the results.
1233 *
1234 * One compute grid with a single thread is launched for every query result
1235 * buffer. The thread (optionally) reads a previous summary buffer, then
1236 * accumulates data from the query result buffer, and writes the result either
1237 * to a summary buffer to be consumed by the next grid invocation or to the
1238 * user-supplied buffer.
1239 *
1240 * Data layout:
1241 *
1242 * CONST
1243 * 0.x = end_offset
1244 * 0.y = result_stride
1245 * 0.z = result_count
1246 * 0.w = bit field:
1247 * 1: read previously accumulated values
1248 * 2: write accumulated values for chaining
1249 * 4: write result available
1250 * 8: convert result to boolean (0/1)
1251 * 16: only read one dword and use that as result
1252 * 32: apply timestamp conversion
1253 * 64: store full 64 bits result
1254 * 128: store signed 32 bits result
1255 * 1.x = fence_offset
1256 * 1.y = pair_stride
1257 * 1.z = pair_count
1258 *
1259 * BUFFER[0] = query result buffer
1260 * BUFFER[1] = previous summary buffer
1261 * BUFFER[2] = next summary buffer or user-supplied buffer
1262 */
1263 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1264 {
1265 /* TEMP[0].xy = accumulated result so far
1266 * TEMP[0].z = result not available
1267 *
1268 * TEMP[1].x = current result index
1269 * TEMP[1].y = current pair index
1270 */
1271 static const char text_tmpl[] =
1272 "COMP\n"
1273 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1274 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1275 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1276 "DCL BUFFER[0]\n"
1277 "DCL BUFFER[1]\n"
1278 "DCL BUFFER[2]\n"
1279 "DCL CONST[0..1]\n"
1280 "DCL TEMP[0..5]\n"
1281 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1282 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1283 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1284 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1285
1286 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1287 "UIF TEMP[5]\n"
1288 /* Check result availability. */
1289 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1290 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1291 "MOV TEMP[1], TEMP[0].zzzz\n"
1292 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1293
1294 /* Load result if available. */
1295 "UIF TEMP[1]\n"
1296 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1297 "ENDIF\n"
1298 "ELSE\n"
1299 /* Load previously accumulated result if requested. */
1300 "MOV TEMP[0], IMM[0].xxxx\n"
1301 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1302 "UIF TEMP[4]\n"
1303 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1304 "ENDIF\n"
1305
1306 "MOV TEMP[1].x, IMM[0].xxxx\n"
1307 "BGNLOOP\n"
1308 /* Break if accumulated result so far is not available. */
1309 "UIF TEMP[0].zzzz\n"
1310 "BRK\n"
1311 "ENDIF\n"
1312
1313 /* Break if result_index >= result_count. */
1314 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1315 "UIF TEMP[5]\n"
1316 "BRK\n"
1317 "ENDIF\n"
1318
1319 /* Load fence and check result availability */
1320 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1321 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1322 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1323 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1324 "UIF TEMP[0].zzzz\n"
1325 "BRK\n"
1326 "ENDIF\n"
1327
1328 "MOV TEMP[1].y, IMM[0].xxxx\n"
1329 "BGNLOOP\n"
1330 /* Load start and end. */
1331 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1332 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1333 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1334
1335 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1336 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1337
1338 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1339 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1340
1341 /* Increment pair index */
1342 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1343 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1344 "UIF TEMP[5]\n"
1345 "BRK\n"
1346 "ENDIF\n"
1347 "ENDLOOP\n"
1348
1349 /* Increment result index */
1350 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1351 "ENDLOOP\n"
1352 "ENDIF\n"
1353
1354 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1355 "UIF TEMP[4]\n"
1356 /* Store accumulated data for chaining. */
1357 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1358 "ELSE\n"
1359 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1360 "UIF TEMP[4]\n"
1361 /* Store result availability. */
1362 "NOT TEMP[0].z, TEMP[0]\n"
1363 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1364 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1365
1366 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1367 "UIF TEMP[4]\n"
1368 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1369 "ENDIF\n"
1370 "ELSE\n"
1371 /* Store result if it is available. */
1372 "NOT TEMP[4], TEMP[0].zzzz\n"
1373 "UIF TEMP[4]\n"
1374 /* Apply timestamp conversion */
1375 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1376 "UIF TEMP[4]\n"
1377 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1378 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1379 "ENDIF\n"
1380
1381 /* Convert to boolean */
1382 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1383 "UIF TEMP[4]\n"
1384 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1385 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1386 "MOV TEMP[0].y, IMM[0].xxxx\n"
1387 "ENDIF\n"
1388
1389 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1390 "UIF TEMP[4]\n"
1391 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1392 "ELSE\n"
1393 /* Clamping */
1394 "UIF TEMP[0].yyyy\n"
1395 "MOV TEMP[0].x, IMM[0].wwww\n"
1396 "ENDIF\n"
1397
1398 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1399 "UIF TEMP[4]\n"
1400 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1401 "ENDIF\n"
1402
1403 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1404 "ENDIF\n"
1405 "ENDIF\n"
1406 "ENDIF\n"
1407 "ENDIF\n"
1408
1409 "END\n";
1410
1411 char text[sizeof(text_tmpl) + 32];
1412 struct tgsi_token tokens[1024];
1413 struct pipe_compute_state state = {};
1414
1415 /* Hard code the frequency into the shader so that the backend can
1416 * use the full range of optimizations for divide-by-constant.
1417 */
1418 snprintf(text, sizeof(text), text_tmpl,
1419 rctx->screen->info.clock_crystal_freq);
1420
1421 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1422 assert(false);
1423 return;
1424 }
1425
1426 state.ir_type = PIPE_SHADER_IR_TGSI;
1427 state.prog = tokens;
1428
1429 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1430 }
1431
1432 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1433 struct r600_qbo_state *st)
1434 {
1435 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1436
1437 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1438 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1439
1440 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1441 for (unsigned i = 0; i < 3; ++i)
1442 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1443 }
1444
1445 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1446 struct r600_query *rquery,
1447 bool wait,
1448 enum pipe_query_value_type result_type,
1449 int index,
1450 struct pipe_resource *resource,
1451 unsigned offset)
1452 {
1453 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1454 struct r600_query_buffer *qbuf;
1455 struct r600_query_buffer *qbuf_prev;
1456 struct pipe_resource *tmp_buffer = NULL;
1457 unsigned tmp_buffer_offset = 0;
1458 struct r600_qbo_state saved_state = {};
1459 struct pipe_grid_info grid = {};
1460 struct pipe_constant_buffer constant_buffer = {};
1461 struct pipe_shader_buffer ssbo[3];
1462 struct r600_hw_query_params params;
1463 struct {
1464 uint32_t end_offset;
1465 uint32_t result_stride;
1466 uint32_t result_count;
1467 uint32_t config;
1468 uint32_t fence_offset;
1469 uint32_t pair_stride;
1470 uint32_t pair_count;
1471 } consts;
1472
1473 if (!rctx->query_result_shader) {
1474 r600_create_query_result_shader(rctx);
1475 if (!rctx->query_result_shader)
1476 return;
1477 }
1478
1479 if (query->buffer.previous) {
1480 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1481 &tmp_buffer_offset, &tmp_buffer);
1482 if (!tmp_buffer)
1483 return;
1484 }
1485
1486 rctx->save_qbo_state(&rctx->b, &saved_state);
1487
1488 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1489 consts.end_offset = params.end_offset - params.start_offset;
1490 consts.fence_offset = params.fence_offset - params.start_offset;
1491 consts.result_stride = query->result_size;
1492 consts.pair_stride = params.pair_stride;
1493 consts.pair_count = params.pair_count;
1494
1495 constant_buffer.buffer_size = sizeof(consts);
1496 constant_buffer.user_buffer = &consts;
1497
1498 ssbo[1].buffer = tmp_buffer;
1499 ssbo[1].buffer_offset = tmp_buffer_offset;
1500 ssbo[1].buffer_size = 16;
1501
1502 ssbo[2] = ssbo[1];
1503
1504 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1505
1506 grid.block[0] = 1;
1507 grid.block[1] = 1;
1508 grid.block[2] = 1;
1509 grid.grid[0] = 1;
1510 grid.grid[1] = 1;
1511 grid.grid[2] = 1;
1512
1513 consts.config = 0;
1514 if (index < 0)
1515 consts.config |= 4;
1516 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1517 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1518 consts.config |= 8;
1519 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1520 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1521 consts.config |= 32;
1522
1523 switch (result_type) {
1524 case PIPE_QUERY_TYPE_U64:
1525 case PIPE_QUERY_TYPE_I64:
1526 consts.config |= 64;
1527 break;
1528 case PIPE_QUERY_TYPE_I32:
1529 consts.config |= 128;
1530 break;
1531 case PIPE_QUERY_TYPE_U32:
1532 break;
1533 }
1534
1535 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1536
1537 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1538 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1539 qbuf_prev = qbuf->previous;
1540 consts.result_count = qbuf->results_end / query->result_size;
1541 consts.config &= ~3;
1542 if (qbuf != &query->buffer)
1543 consts.config |= 1;
1544 if (qbuf->previous)
1545 consts.config |= 2;
1546 } else {
1547 /* Only read the last timestamp. */
1548 qbuf_prev = NULL;
1549 consts.result_count = 0;
1550 consts.config |= 16;
1551 params.start_offset += qbuf->results_end - query->result_size;
1552 }
1553
1554 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1555
1556 ssbo[0].buffer = &qbuf->buf->b.b;
1557 ssbo[0].buffer_offset = params.start_offset;
1558 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1559
1560 if (!qbuf->previous) {
1561 ssbo[2].buffer = resource;
1562 ssbo[2].buffer_offset = offset;
1563 ssbo[2].buffer_size = 8;
1564
1565 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1566 }
1567
1568 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1569
1570 if (wait && qbuf == &query->buffer) {
1571 uint64_t va;
1572
1573 /* Wait for result availability. Wait only for readiness
1574 * of the last entry, since the fence writes should be
1575 * serialized in the CP.
1576 */
1577 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1578 va += params.fence_offset;
1579
1580 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1581 }
1582
1583 rctx->b.launch_grid(&rctx->b, &grid);
1584 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1585 }
1586
1587 r600_restore_qbo_state(rctx, &saved_state);
1588 pipe_resource_reference(&tmp_buffer, NULL);
1589 }
1590
1591 static void r600_render_condition(struct pipe_context *ctx,
1592 struct pipe_query *query,
1593 boolean condition,
1594 enum pipe_render_cond_flag mode)
1595 {
1596 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1597 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1598 struct r600_query_buffer *qbuf;
1599 struct r600_atom *atom = &rctx->render_cond_atom;
1600
1601 rctx->render_cond = query;
1602 rctx->render_cond_invert = condition;
1603 rctx->render_cond_mode = mode;
1604
1605 /* Compute the size of SET_PREDICATION packets. */
1606 atom->num_dw = 0;
1607 if (query) {
1608 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1609 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1610 }
1611
1612 rctx->set_atom_dirty(rctx, atom, query != NULL);
1613 }
1614
1615 void r600_suspend_queries(struct r600_common_context *ctx)
1616 {
1617 struct r600_query_hw *query;
1618
1619 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1620 r600_query_hw_emit_stop(ctx, query);
1621 }
1622 assert(ctx->num_cs_dw_queries_suspend == 0);
1623 }
1624
1625 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1626 struct list_head *query_list)
1627 {
1628 struct r600_query_hw *query;
1629 unsigned num_dw = 0;
1630
1631 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1632 /* begin + end */
1633 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1634
1635 /* Workaround for the fact that
1636 * num_cs_dw_nontimer_queries_suspend is incremented for every
1637 * resumed query, which raises the bar in need_cs_space for
1638 * queries about to be resumed.
1639 */
1640 num_dw += query->num_cs_dw_end;
1641 }
1642 /* primitives generated query */
1643 num_dw += ctx->streamout.enable_atom.num_dw;
1644 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1645 num_dw += 13;
1646
1647 return num_dw;
1648 }
1649
1650 void r600_resume_queries(struct r600_common_context *ctx)
1651 {
1652 struct r600_query_hw *query;
1653 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1654
1655 assert(ctx->num_cs_dw_queries_suspend == 0);
1656
1657 /* Check CS space here. Resuming must not be interrupted by flushes. */
1658 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1659
1660 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1661 r600_query_hw_emit_start(ctx, query);
1662 }
1663 }
1664
1665 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1666 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1667 {
1668 struct r600_common_context *ctx =
1669 (struct r600_common_context*)rscreen->aux_context;
1670 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1671 struct r600_resource *buffer;
1672 uint32_t *results;
1673 unsigned i, mask = 0;
1674 unsigned max_rbs = ctx->screen->info.num_render_backends;
1675
1676 assert(rscreen->chip_class <= CAYMAN);
1677
1678 /* if backend_map query is supported by the kernel */
1679 if (rscreen->info.r600_gb_backend_map_valid) {
1680 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1681 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1682 unsigned item_width, item_mask;
1683
1684 if (ctx->chip_class >= EVERGREEN) {
1685 item_width = 4;
1686 item_mask = 0x7;
1687 } else {
1688 item_width = 2;
1689 item_mask = 0x3;
1690 }
1691
1692 while (num_tile_pipes--) {
1693 i = backend_map & item_mask;
1694 mask |= (1<<i);
1695 backend_map >>= item_width;
1696 }
1697 if (mask != 0) {
1698 rscreen->info.enabled_rb_mask = mask;
1699 return;
1700 }
1701 }
1702
1703 /* otherwise backup path for older kernels */
1704
1705 /* create buffer for event data */
1706 buffer = (struct r600_resource*)
1707 pipe_buffer_create(ctx->b.screen, 0,
1708 PIPE_USAGE_STAGING, max_rbs * 16);
1709 if (!buffer)
1710 return;
1711
1712 /* initialize buffer with zeroes */
1713 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1714 if (results) {
1715 memset(results, 0, max_rbs * 4 * 4);
1716
1717 /* emit EVENT_WRITE for ZPASS_DONE */
1718 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1719 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1720 radeon_emit(cs, buffer->gpu_address);
1721 radeon_emit(cs, buffer->gpu_address >> 32);
1722
1723 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1724 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1725
1726 /* analyze results */
1727 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1728 if (results) {
1729 for(i = 0; i < max_rbs; i++) {
1730 /* at least highest bit will be set if backend is used */
1731 if (results[i*4 + 1])
1732 mask |= (1<<i);
1733 }
1734 }
1735 }
1736
1737 r600_resource_reference(&buffer, NULL);
1738
1739 if (mask)
1740 rscreen->info.enabled_rb_mask = mask;
1741 }
1742
1743 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1744 { \
1745 .name = name_, \
1746 .query_type = R600_QUERY_##query_type_, \
1747 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1748 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1749 .group_id = group_id_ \
1750 }
1751
1752 #define X(name_, query_type_, type_, result_type_) \
1753 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1754
1755 #define XG(group_, name_, query_type_, type_, result_type_) \
1756 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1757
1758 static struct pipe_driver_query_info r600_driver_query_list[] = {
1759 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1760 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1761 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1762 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1763 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1764 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1765 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1766 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1767 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1768 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1769 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1770 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1771 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1772 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1773 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1774 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1775 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1776 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1777 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1778 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1779 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1780 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1781 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1782 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1783 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1784 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1785 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1786 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1787 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1788 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1789
1790 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1791 * which use it as a fallback path to detect the GPU type.
1792 *
1793 * Note: The names of these queries are significant for GPUPerfStudio
1794 * (and possibly their order as well). */
1795 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1796 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1797 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1798 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1799 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1800
1801 /* The following queries must be at the end of the list because their
1802 * availability is adjusted dynamically based on the DRM version. */
1803 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1804 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1805 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1806 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1807 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1808 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1809 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1810 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1811 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1812 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1813 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1814 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1815 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1816 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1817 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1818 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1819 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1820 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1821 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1822 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1823 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1824 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1825
1826 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1827 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1828 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1829 };
1830
1831 #undef X
1832 #undef XG
1833 #undef XFULL
1834
1835 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1836 {
1837 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1838 return ARRAY_SIZE(r600_driver_query_list);
1839 else if (rscreen->info.drm_major == 3) {
1840 if (rscreen->chip_class >= VI)
1841 return ARRAY_SIZE(r600_driver_query_list) - 3;
1842 else
1843 return ARRAY_SIZE(r600_driver_query_list) - 10;
1844 }
1845 else
1846 return ARRAY_SIZE(r600_driver_query_list) - 25;
1847 }
1848
1849 static int r600_get_driver_query_info(struct pipe_screen *screen,
1850 unsigned index,
1851 struct pipe_driver_query_info *info)
1852 {
1853 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1854 unsigned num_queries = r600_get_num_queries(rscreen);
1855
1856 if (!info) {
1857 unsigned num_perfcounters =
1858 r600_get_perfcounter_info(rscreen, 0, NULL);
1859
1860 return num_queries + num_perfcounters;
1861 }
1862
1863 if (index >= num_queries)
1864 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1865
1866 *info = r600_driver_query_list[index];
1867
1868 switch (info->query_type) {
1869 case R600_QUERY_REQUESTED_VRAM:
1870 case R600_QUERY_VRAM_USAGE:
1871 case R600_QUERY_MAPPED_VRAM:
1872 info->max_value.u64 = rscreen->info.vram_size;
1873 break;
1874 case R600_QUERY_REQUESTED_GTT:
1875 case R600_QUERY_GTT_USAGE:
1876 case R600_QUERY_MAPPED_GTT:
1877 info->max_value.u64 = rscreen->info.gart_size;
1878 break;
1879 case R600_QUERY_GPU_TEMPERATURE:
1880 info->max_value.u64 = 125;
1881 break;
1882 case R600_QUERY_VRAM_VIS_USAGE:
1883 info->max_value.u64 = rscreen->info.vram_vis_size;
1884 break;
1885 }
1886
1887 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1888 info->group_id += rscreen->perfcounters->num_groups;
1889
1890 return 1;
1891 }
1892
1893 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1894 * performance counter groups, so be careful when changing this and related
1895 * functions.
1896 */
1897 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1898 unsigned index,
1899 struct pipe_driver_query_group_info *info)
1900 {
1901 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1902 unsigned num_pc_groups = 0;
1903
1904 if (rscreen->perfcounters)
1905 num_pc_groups = rscreen->perfcounters->num_groups;
1906
1907 if (!info)
1908 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1909
1910 if (index < num_pc_groups)
1911 return r600_get_perfcounter_group_info(rscreen, index, info);
1912
1913 index -= num_pc_groups;
1914 if (index >= R600_NUM_SW_QUERY_GROUPS)
1915 return 0;
1916
1917 info->name = "GPIN";
1918 info->max_active_queries = 5;
1919 info->num_queries = 5;
1920 return 1;
1921 }
1922
1923 void r600_query_init(struct r600_common_context *rctx)
1924 {
1925 rctx->b.create_query = r600_create_query;
1926 rctx->b.create_batch_query = r600_create_batch_query;
1927 rctx->b.destroy_query = r600_destroy_query;
1928 rctx->b.begin_query = r600_begin_query;
1929 rctx->b.end_query = r600_end_query;
1930 rctx->b.get_query_result = r600_get_query_result;
1931 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1932 rctx->render_cond_atom.emit = r600_emit_query_predication;
1933
1934 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1935 rctx->b.render_condition = r600_render_condition;
1936
1937 LIST_INITHEAD(&rctx->active_queries);
1938 }
1939
1940 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1941 {
1942 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1943 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1944 }