gallium/radeon: formalize that create_query doesn't need pipe_context
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46
47 uint64_t begin_time;
48 uint64_t end_time;
49
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle *fence;
52 };
53
54 static void r600_query_sw_destroy(struct r600_common_context *rctx,
55 struct r600_query *rquery)
56 {
57 struct pipe_screen *screen = rctx->b.screen;
58 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
59
60 screen->fence_reference(screen, &query->fence, NULL);
61 FREE(query);
62 }
63
64 static enum radeon_value_id winsys_id_from_type(unsigned type)
65 {
66 switch (type) {
67 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
68 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
69 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
70 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
71 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
72 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
73 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
74 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
75 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
76 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
77 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
78 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
79 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
80 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
81 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
82 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
83 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
84 default: unreachable("query type does not correspond to winsys id");
85 }
86 }
87
88 static bool r600_query_sw_begin(struct r600_common_context *rctx,
89 struct r600_query *rquery)
90 {
91 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
92 enum radeon_value_id ws_id;
93
94 switch(query->b.type) {
95 case PIPE_QUERY_TIMESTAMP_DISJOINT:
96 case PIPE_QUERY_GPU_FINISHED:
97 break;
98 case R600_QUERY_DRAW_CALLS:
99 query->begin_result = rctx->num_draw_calls;
100 break;
101 case R600_QUERY_SPILL_DRAW_CALLS:
102 query->begin_result = rctx->num_spill_draw_calls;
103 break;
104 case R600_QUERY_COMPUTE_CALLS:
105 query->begin_result = rctx->num_compute_calls;
106 break;
107 case R600_QUERY_SPILL_COMPUTE_CALLS:
108 query->begin_result = rctx->num_spill_compute_calls;
109 break;
110 case R600_QUERY_DMA_CALLS:
111 query->begin_result = rctx->num_dma_calls;
112 break;
113 case R600_QUERY_CP_DMA_CALLS:
114 query->begin_result = rctx->num_cp_dma_calls;
115 break;
116 case R600_QUERY_NUM_VS_FLUSHES:
117 query->begin_result = rctx->num_vs_flushes;
118 break;
119 case R600_QUERY_NUM_PS_FLUSHES:
120 query->begin_result = rctx->num_ps_flushes;
121 break;
122 case R600_QUERY_NUM_CS_FLUSHES:
123 query->begin_result = rctx->num_cs_flushes;
124 break;
125 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
126 query->begin_result = rctx->num_fb_cache_flushes;
127 break;
128 case R600_QUERY_NUM_L2_INVALIDATES:
129 query->begin_result = rctx->num_L2_invalidates;
130 break;
131 case R600_QUERY_NUM_L2_WRITEBACKS:
132 query->begin_result = rctx->num_L2_writebacks;
133 break;
134 case R600_QUERY_REQUESTED_VRAM:
135 case R600_QUERY_REQUESTED_GTT:
136 case R600_QUERY_MAPPED_VRAM:
137 case R600_QUERY_MAPPED_GTT:
138 case R600_QUERY_VRAM_USAGE:
139 case R600_QUERY_VRAM_VIS_USAGE:
140 case R600_QUERY_GTT_USAGE:
141 case R600_QUERY_GPU_TEMPERATURE:
142 case R600_QUERY_CURRENT_GPU_SCLK:
143 case R600_QUERY_CURRENT_GPU_MCLK:
144 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
145 case R600_QUERY_NUM_MAPPED_BUFFERS:
146 query->begin_result = 0;
147 break;
148 case R600_QUERY_BUFFER_WAIT_TIME:
149 case R600_QUERY_NUM_GFX_IBS:
150 case R600_QUERY_NUM_SDMA_IBS:
151 case R600_QUERY_NUM_BYTES_MOVED:
152 case R600_QUERY_NUM_EVICTIONS: {
153 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
154 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
155 break;
156 }
157 case R600_QUERY_CS_THREAD_BUSY:
158 ws_id = winsys_id_from_type(query->b.type);
159 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
160 query->begin_time = os_time_get_nano();
161 break;
162 case R600_QUERY_GPU_LOAD:
163 case R600_QUERY_GPU_SHADERS_BUSY:
164 case R600_QUERY_GPU_TA_BUSY:
165 case R600_QUERY_GPU_GDS_BUSY:
166 case R600_QUERY_GPU_VGT_BUSY:
167 case R600_QUERY_GPU_IA_BUSY:
168 case R600_QUERY_GPU_SX_BUSY:
169 case R600_QUERY_GPU_WD_BUSY:
170 case R600_QUERY_GPU_BCI_BUSY:
171 case R600_QUERY_GPU_SC_BUSY:
172 case R600_QUERY_GPU_PA_BUSY:
173 case R600_QUERY_GPU_DB_BUSY:
174 case R600_QUERY_GPU_CP_BUSY:
175 case R600_QUERY_GPU_CB_BUSY:
176 case R600_QUERY_GPU_SDMA_BUSY:
177 case R600_QUERY_GPU_PFP_BUSY:
178 case R600_QUERY_GPU_MEQ_BUSY:
179 case R600_QUERY_GPU_ME_BUSY:
180 case R600_QUERY_GPU_SURF_SYNC_BUSY:
181 case R600_QUERY_GPU_DMA_BUSY:
182 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
183 case R600_QUERY_GPU_CE_BUSY:
184 query->begin_result = r600_begin_counter(rctx->screen,
185 query->b.type);
186 break;
187 case R600_QUERY_NUM_COMPILATIONS:
188 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
189 break;
190 case R600_QUERY_NUM_SHADERS_CREATED:
191 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
192 break;
193 case R600_QUERY_NUM_SHADER_CACHE_HITS:
194 query->begin_result =
195 p_atomic_read(&rctx->screen->num_shader_cache_hits);
196 break;
197 case R600_QUERY_GPIN_ASIC_ID:
198 case R600_QUERY_GPIN_NUM_SIMD:
199 case R600_QUERY_GPIN_NUM_RB:
200 case R600_QUERY_GPIN_NUM_SPI:
201 case R600_QUERY_GPIN_NUM_SE:
202 break;
203 default:
204 unreachable("r600_query_sw_begin: bad query type");
205 }
206
207 return true;
208 }
209
210 static bool r600_query_sw_end(struct r600_common_context *rctx,
211 struct r600_query *rquery)
212 {
213 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
214 enum radeon_value_id ws_id;
215
216 switch(query->b.type) {
217 case PIPE_QUERY_TIMESTAMP_DISJOINT:
218 break;
219 case PIPE_QUERY_GPU_FINISHED:
220 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
221 break;
222 case R600_QUERY_DRAW_CALLS:
223 query->end_result = rctx->num_draw_calls;
224 break;
225 case R600_QUERY_SPILL_DRAW_CALLS:
226 query->end_result = rctx->num_spill_draw_calls;
227 break;
228 case R600_QUERY_COMPUTE_CALLS:
229 query->end_result = rctx->num_compute_calls;
230 break;
231 case R600_QUERY_SPILL_COMPUTE_CALLS:
232 query->end_result = rctx->num_spill_compute_calls;
233 break;
234 case R600_QUERY_DMA_CALLS:
235 query->end_result = rctx->num_dma_calls;
236 break;
237 case R600_QUERY_CP_DMA_CALLS:
238 query->end_result = rctx->num_cp_dma_calls;
239 break;
240 case R600_QUERY_NUM_VS_FLUSHES:
241 query->end_result = rctx->num_vs_flushes;
242 break;
243 case R600_QUERY_NUM_PS_FLUSHES:
244 query->end_result = rctx->num_ps_flushes;
245 break;
246 case R600_QUERY_NUM_CS_FLUSHES:
247 query->end_result = rctx->num_cs_flushes;
248 break;
249 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
250 query->end_result = rctx->num_fb_cache_flushes;
251 break;
252 case R600_QUERY_NUM_L2_INVALIDATES:
253 query->end_result = rctx->num_L2_invalidates;
254 break;
255 case R600_QUERY_NUM_L2_WRITEBACKS:
256 query->end_result = rctx->num_L2_writebacks;
257 break;
258 case R600_QUERY_REQUESTED_VRAM:
259 case R600_QUERY_REQUESTED_GTT:
260 case R600_QUERY_MAPPED_VRAM:
261 case R600_QUERY_MAPPED_GTT:
262 case R600_QUERY_VRAM_USAGE:
263 case R600_QUERY_VRAM_VIS_USAGE:
264 case R600_QUERY_GTT_USAGE:
265 case R600_QUERY_GPU_TEMPERATURE:
266 case R600_QUERY_CURRENT_GPU_SCLK:
267 case R600_QUERY_CURRENT_GPU_MCLK:
268 case R600_QUERY_BUFFER_WAIT_TIME:
269 case R600_QUERY_NUM_MAPPED_BUFFERS:
270 case R600_QUERY_NUM_GFX_IBS:
271 case R600_QUERY_NUM_SDMA_IBS:
272 case R600_QUERY_NUM_BYTES_MOVED:
273 case R600_QUERY_NUM_EVICTIONS: {
274 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
275 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
276 break;
277 }
278 case R600_QUERY_CS_THREAD_BUSY:
279 ws_id = winsys_id_from_type(query->b.type);
280 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
281 query->end_time = os_time_get_nano();
282 break;
283 case R600_QUERY_GPU_LOAD:
284 case R600_QUERY_GPU_SHADERS_BUSY:
285 case R600_QUERY_GPU_TA_BUSY:
286 case R600_QUERY_GPU_GDS_BUSY:
287 case R600_QUERY_GPU_VGT_BUSY:
288 case R600_QUERY_GPU_IA_BUSY:
289 case R600_QUERY_GPU_SX_BUSY:
290 case R600_QUERY_GPU_WD_BUSY:
291 case R600_QUERY_GPU_BCI_BUSY:
292 case R600_QUERY_GPU_SC_BUSY:
293 case R600_QUERY_GPU_PA_BUSY:
294 case R600_QUERY_GPU_DB_BUSY:
295 case R600_QUERY_GPU_CP_BUSY:
296 case R600_QUERY_GPU_CB_BUSY:
297 case R600_QUERY_GPU_SDMA_BUSY:
298 case R600_QUERY_GPU_PFP_BUSY:
299 case R600_QUERY_GPU_MEQ_BUSY:
300 case R600_QUERY_GPU_ME_BUSY:
301 case R600_QUERY_GPU_SURF_SYNC_BUSY:
302 case R600_QUERY_GPU_DMA_BUSY:
303 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
304 case R600_QUERY_GPU_CE_BUSY:
305 query->end_result = r600_end_counter(rctx->screen,
306 query->b.type,
307 query->begin_result);
308 query->begin_result = 0;
309 break;
310 case R600_QUERY_NUM_COMPILATIONS:
311 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
312 break;
313 case R600_QUERY_NUM_SHADERS_CREATED:
314 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
315 break;
316 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
317 query->end_result = rctx->last_tex_ps_draw_ratio;
318 break;
319 case R600_QUERY_NUM_SHADER_CACHE_HITS:
320 query->end_result =
321 p_atomic_read(&rctx->screen->num_shader_cache_hits);
322 break;
323 case R600_QUERY_GPIN_ASIC_ID:
324 case R600_QUERY_GPIN_NUM_SIMD:
325 case R600_QUERY_GPIN_NUM_RB:
326 case R600_QUERY_GPIN_NUM_SPI:
327 case R600_QUERY_GPIN_NUM_SE:
328 break;
329 default:
330 unreachable("r600_query_sw_end: bad query type");
331 }
332
333 return true;
334 }
335
336 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
337 struct r600_query *rquery,
338 bool wait,
339 union pipe_query_result *result)
340 {
341 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
342
343 switch (query->b.type) {
344 case PIPE_QUERY_TIMESTAMP_DISJOINT:
345 /* Convert from cycles per millisecond to cycles per second (Hz). */
346 result->timestamp_disjoint.frequency =
347 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
348 result->timestamp_disjoint.disjoint = false;
349 return true;
350 case PIPE_QUERY_GPU_FINISHED: {
351 struct pipe_screen *screen = rctx->b.screen;
352 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
353 wait ? PIPE_TIMEOUT_INFINITE : 0);
354 return result->b;
355 }
356
357 case R600_QUERY_CS_THREAD_BUSY:
358 result->u64 = (query->end_result - query->begin_result) * 100 /
359 (query->end_time - query->begin_time);
360 return true;
361 case R600_QUERY_GPIN_ASIC_ID:
362 result->u32 = 0;
363 return true;
364 case R600_QUERY_GPIN_NUM_SIMD:
365 result->u32 = rctx->screen->info.num_good_compute_units;
366 return true;
367 case R600_QUERY_GPIN_NUM_RB:
368 result->u32 = rctx->screen->info.num_render_backends;
369 return true;
370 case R600_QUERY_GPIN_NUM_SPI:
371 result->u32 = 1; /* all supported chips have one SPI per SE */
372 return true;
373 case R600_QUERY_GPIN_NUM_SE:
374 result->u32 = rctx->screen->info.max_se;
375 return true;
376 }
377
378 result->u64 = query->end_result - query->begin_result;
379
380 switch (query->b.type) {
381 case R600_QUERY_BUFFER_WAIT_TIME:
382 case R600_QUERY_GPU_TEMPERATURE:
383 result->u64 /= 1000;
384 break;
385 case R600_QUERY_CURRENT_GPU_SCLK:
386 case R600_QUERY_CURRENT_GPU_MCLK:
387 result->u64 *= 1000000;
388 break;
389 }
390
391 return true;
392 }
393
394
395 static struct r600_query_ops sw_query_ops = {
396 .destroy = r600_query_sw_destroy,
397 .begin = r600_query_sw_begin,
398 .end = r600_query_sw_end,
399 .get_result = r600_query_sw_get_result,
400 .get_result_resource = NULL
401 };
402
403 static struct pipe_query *r600_query_sw_create(unsigned query_type)
404 {
405 struct r600_query_sw *query;
406
407 query = CALLOC_STRUCT(r600_query_sw);
408 if (!query)
409 return NULL;
410
411 query->b.type = query_type;
412 query->b.ops = &sw_query_ops;
413
414 return (struct pipe_query *)query;
415 }
416
417 void r600_query_hw_destroy(struct r600_common_context *rctx,
418 struct r600_query *rquery)
419 {
420 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
421 struct r600_query_buffer *prev = query->buffer.previous;
422
423 /* Release all query buffers. */
424 while (prev) {
425 struct r600_query_buffer *qbuf = prev;
426 prev = prev->previous;
427 r600_resource_reference(&qbuf->buf, NULL);
428 FREE(qbuf);
429 }
430
431 r600_resource_reference(&query->buffer.buf, NULL);
432 FREE(rquery);
433 }
434
435 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
436 struct r600_query_hw *query)
437 {
438 unsigned buf_size = MAX2(query->result_size,
439 rscreen->info.min_alloc_size);
440
441 /* Queries are normally read by the CPU after
442 * being written by the gpu, hence staging is probably a good
443 * usage pattern.
444 */
445 struct r600_resource *buf = (struct r600_resource*)
446 pipe_buffer_create(&rscreen->b, 0,
447 PIPE_USAGE_STAGING, buf_size);
448 if (!buf)
449 return NULL;
450
451 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
452 r600_resource_reference(&buf, NULL);
453 return NULL;
454 }
455
456 return buf;
457 }
458
459 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
460 struct r600_query_hw *query,
461 struct r600_resource *buffer)
462 {
463 /* Callers ensure that the buffer is currently unused by the GPU. */
464 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
465 PIPE_TRANSFER_WRITE |
466 PIPE_TRANSFER_UNSYNCHRONIZED);
467 if (!results)
468 return false;
469
470 memset(results, 0, buffer->b.b.width0);
471
472 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
473 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
474 unsigned max_rbs = rscreen->info.num_render_backends;
475 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
476 unsigned num_results;
477 unsigned i, j;
478
479 /* Set top bits for unused backends. */
480 num_results = buffer->b.b.width0 / query->result_size;
481 for (j = 0; j < num_results; j++) {
482 for (i = 0; i < max_rbs; i++) {
483 if (!(enabled_rb_mask & (1<<i))) {
484 results[(i * 4)+1] = 0x80000000;
485 results[(i * 4)+3] = 0x80000000;
486 }
487 }
488 results += 4 * max_rbs;
489 }
490 }
491
492 return true;
493 }
494
495 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
496 struct r600_query *rquery,
497 bool wait,
498 enum pipe_query_value_type result_type,
499 int index,
500 struct pipe_resource *resource,
501 unsigned offset);
502
503 static struct r600_query_ops query_hw_ops = {
504 .destroy = r600_query_hw_destroy,
505 .begin = r600_query_hw_begin,
506 .end = r600_query_hw_end,
507 .get_result = r600_query_hw_get_result,
508 .get_result_resource = r600_query_hw_get_result_resource,
509 };
510
511 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
512 struct r600_query_hw *query,
513 struct r600_resource *buffer,
514 uint64_t va);
515 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
516 struct r600_query_hw *query,
517 struct r600_resource *buffer,
518 uint64_t va);
519 static void r600_query_hw_add_result(struct r600_common_context *ctx,
520 struct r600_query_hw *, void *buffer,
521 union pipe_query_result *result);
522 static void r600_query_hw_clear_result(struct r600_query_hw *,
523 union pipe_query_result *);
524
525 static struct r600_query_hw_ops query_hw_default_hw_ops = {
526 .prepare_buffer = r600_query_hw_prepare_buffer,
527 .emit_start = r600_query_hw_do_emit_start,
528 .emit_stop = r600_query_hw_do_emit_stop,
529 .clear_result = r600_query_hw_clear_result,
530 .add_result = r600_query_hw_add_result,
531 };
532
533 bool r600_query_hw_init(struct r600_common_screen *rscreen,
534 struct r600_query_hw *query)
535 {
536 query->buffer.buf = r600_new_query_buffer(rscreen, query);
537 if (!query->buffer.buf)
538 return false;
539
540 return true;
541 }
542
543 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
544 unsigned query_type,
545 unsigned index)
546 {
547 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
548 if (!query)
549 return NULL;
550
551 query->b.type = query_type;
552 query->b.ops = &query_hw_ops;
553 query->ops = &query_hw_default_hw_ops;
554
555 switch (query_type) {
556 case PIPE_QUERY_OCCLUSION_COUNTER:
557 case PIPE_QUERY_OCCLUSION_PREDICATE:
558 query->result_size = 16 * rscreen->info.num_render_backends;
559 query->result_size += 16; /* for the fence + alignment */
560 query->num_cs_dw_begin = 6;
561 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
562 break;
563 case PIPE_QUERY_TIME_ELAPSED:
564 query->result_size = 24;
565 query->num_cs_dw_begin = 8;
566 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
567 break;
568 case PIPE_QUERY_TIMESTAMP:
569 query->result_size = 16;
570 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
571 query->flags = R600_QUERY_HW_FLAG_NO_START;
572 break;
573 case PIPE_QUERY_PRIMITIVES_EMITTED:
574 case PIPE_QUERY_PRIMITIVES_GENERATED:
575 case PIPE_QUERY_SO_STATISTICS:
576 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
577 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
578 query->result_size = 32;
579 query->num_cs_dw_begin = 6;
580 query->num_cs_dw_end = 6;
581 query->stream = index;
582 break;
583 case PIPE_QUERY_PIPELINE_STATISTICS:
584 /* 11 values on EG, 8 on R600. */
585 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
586 query->result_size += 8; /* for the fence + alignment */
587 query->num_cs_dw_begin = 6;
588 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
589 break;
590 default:
591 assert(0);
592 FREE(query);
593 return NULL;
594 }
595
596 if (!r600_query_hw_init(rscreen, query)) {
597 FREE(query);
598 return NULL;
599 }
600
601 return (struct pipe_query *)query;
602 }
603
604 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
605 unsigned type, int diff)
606 {
607 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
608 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
609 bool old_enable = rctx->num_occlusion_queries != 0;
610 bool old_perfect_enable =
611 rctx->num_perfect_occlusion_queries != 0;
612 bool enable, perfect_enable;
613
614 rctx->num_occlusion_queries += diff;
615 assert(rctx->num_occlusion_queries >= 0);
616
617 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
618 rctx->num_perfect_occlusion_queries += diff;
619 assert(rctx->num_perfect_occlusion_queries >= 0);
620 }
621
622 enable = rctx->num_occlusion_queries != 0;
623 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
624
625 if (enable != old_enable || perfect_enable != old_perfect_enable) {
626 rctx->set_occlusion_query_state(&rctx->b, enable);
627 }
628 }
629 }
630
631 static unsigned event_type_for_stream(struct r600_query_hw *query)
632 {
633 switch (query->stream) {
634 default:
635 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
636 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
637 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
638 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
639 }
640 }
641
642 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
643 struct r600_query_hw *query,
644 struct r600_resource *buffer,
645 uint64_t va)
646 {
647 struct radeon_winsys_cs *cs = ctx->gfx.cs;
648
649 switch (query->b.type) {
650 case PIPE_QUERY_OCCLUSION_COUNTER:
651 case PIPE_QUERY_OCCLUSION_PREDICATE:
652 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
653 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
654 radeon_emit(cs, va);
655 radeon_emit(cs, (va >> 32) & 0xFFFF);
656 break;
657 case PIPE_QUERY_PRIMITIVES_EMITTED:
658 case PIPE_QUERY_PRIMITIVES_GENERATED:
659 case PIPE_QUERY_SO_STATISTICS:
660 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
661 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
662 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
663 radeon_emit(cs, va);
664 radeon_emit(cs, (va >> 32) & 0xFFFF);
665 break;
666 case PIPE_QUERY_TIME_ELAPSED:
667 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
668 0, 3, NULL, va, 0, 0);
669 break;
670 case PIPE_QUERY_PIPELINE_STATISTICS:
671 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
672 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
673 radeon_emit(cs, va);
674 radeon_emit(cs, (va >> 32) & 0xFFFF);
675 break;
676 default:
677 assert(0);
678 }
679 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
680 RADEON_PRIO_QUERY);
681 }
682
683 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
684 struct r600_query_hw *query)
685 {
686 uint64_t va;
687
688 if (!query->buffer.buf)
689 return; // previous buffer allocation failure
690
691 r600_update_occlusion_query_state(ctx, query->b.type, 1);
692 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
693
694 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
695 true);
696
697 /* Get a new query buffer if needed. */
698 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
699 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
700 *qbuf = query->buffer;
701 query->buffer.results_end = 0;
702 query->buffer.previous = qbuf;
703 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
704 if (!query->buffer.buf)
705 return;
706 }
707
708 /* emit begin query */
709 va = query->buffer.buf->gpu_address + query->buffer.results_end;
710
711 query->ops->emit_start(ctx, query, query->buffer.buf, va);
712
713 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
714 }
715
716 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
717 struct r600_query_hw *query,
718 struct r600_resource *buffer,
719 uint64_t va)
720 {
721 struct radeon_winsys_cs *cs = ctx->gfx.cs;
722 uint64_t fence_va = 0;
723
724 switch (query->b.type) {
725 case PIPE_QUERY_OCCLUSION_COUNTER:
726 case PIPE_QUERY_OCCLUSION_PREDICATE:
727 va += 8;
728 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
729 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
730 radeon_emit(cs, va);
731 radeon_emit(cs, (va >> 32) & 0xFFFF);
732
733 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
734 break;
735 case PIPE_QUERY_PRIMITIVES_EMITTED:
736 case PIPE_QUERY_PRIMITIVES_GENERATED:
737 case PIPE_QUERY_SO_STATISTICS:
738 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
739 va += query->result_size/2;
740 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
741 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
742 radeon_emit(cs, va);
743 radeon_emit(cs, (va >> 32) & 0xFFFF);
744 break;
745 case PIPE_QUERY_TIME_ELAPSED:
746 va += 8;
747 /* fall through */
748 case PIPE_QUERY_TIMESTAMP:
749 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
750 0, 3, NULL, va, 0, 0);
751 fence_va = va + 8;
752 break;
753 case PIPE_QUERY_PIPELINE_STATISTICS: {
754 unsigned sample_size = (query->result_size - 8) / 2;
755
756 va += sample_size;
757 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
758 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
759 radeon_emit(cs, va);
760 radeon_emit(cs, (va >> 32) & 0xFFFF);
761
762 fence_va = va + sample_size;
763 break;
764 }
765 default:
766 assert(0);
767 }
768 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
769 RADEON_PRIO_QUERY);
770
771 if (fence_va)
772 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
773 query->buffer.buf, fence_va, 0, 0x80000000);
774 }
775
776 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
777 struct r600_query_hw *query)
778 {
779 uint64_t va;
780
781 if (!query->buffer.buf)
782 return; // previous buffer allocation failure
783
784 /* The queries which need begin already called this in begin_query. */
785 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
786 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
787 }
788
789 /* emit end query */
790 va = query->buffer.buf->gpu_address + query->buffer.results_end;
791
792 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
793
794 query->buffer.results_end += query->result_size;
795
796 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
797 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
798
799 r600_update_occlusion_query_state(ctx, query->b.type, -1);
800 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
801 }
802
803 static void r600_emit_query_predication(struct r600_common_context *ctx,
804 struct r600_atom *atom)
805 {
806 struct radeon_winsys_cs *cs = ctx->gfx.cs;
807 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
808 struct r600_query_buffer *qbuf;
809 uint32_t op;
810 bool flag_wait;
811
812 if (!query)
813 return;
814
815 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
816 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
817
818 switch (query->b.type) {
819 case PIPE_QUERY_OCCLUSION_COUNTER:
820 case PIPE_QUERY_OCCLUSION_PREDICATE:
821 op = PRED_OP(PREDICATION_OP_ZPASS);
822 break;
823 case PIPE_QUERY_PRIMITIVES_EMITTED:
824 case PIPE_QUERY_PRIMITIVES_GENERATED:
825 case PIPE_QUERY_SO_STATISTICS:
826 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
827 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
828 break;
829 default:
830 assert(0);
831 return;
832 }
833
834 /* if true then invert, see GL_ARB_conditional_render_inverted */
835 if (ctx->render_cond_invert)
836 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
837 else
838 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
839
840 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
841
842 /* emit predicate packets for all data blocks */
843 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
844 unsigned results_base = 0;
845 uint64_t va = qbuf->buf->gpu_address;
846
847 while (results_base < qbuf->results_end) {
848 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
849 radeon_emit(cs, va + results_base);
850 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
851 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
852 RADEON_PRIO_QUERY);
853 results_base += query->result_size;
854
855 /* set CONTINUE bit for all packets except the first */
856 op |= PREDICATION_CONTINUE;
857 }
858 }
859 }
860
861 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
862 {
863 struct r600_common_screen *rscreen =
864 (struct r600_common_screen *)ctx->screen;
865
866 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
867 query_type == PIPE_QUERY_GPU_FINISHED ||
868 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
869 return r600_query_sw_create(query_type);
870
871 return r600_query_hw_create(rscreen, query_type, index);
872 }
873
874 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
875 {
876 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
877 struct r600_query *rquery = (struct r600_query *)query;
878
879 rquery->ops->destroy(rctx, rquery);
880 }
881
882 static boolean r600_begin_query(struct pipe_context *ctx,
883 struct pipe_query *query)
884 {
885 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
886 struct r600_query *rquery = (struct r600_query *)query;
887
888 return rquery->ops->begin(rctx, rquery);
889 }
890
891 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
892 struct r600_query_hw *query)
893 {
894 struct r600_query_buffer *prev = query->buffer.previous;
895
896 /* Discard the old query buffers. */
897 while (prev) {
898 struct r600_query_buffer *qbuf = prev;
899 prev = prev->previous;
900 r600_resource_reference(&qbuf->buf, NULL);
901 FREE(qbuf);
902 }
903
904 query->buffer.results_end = 0;
905 query->buffer.previous = NULL;
906
907 /* Obtain a new buffer if the current one can't be mapped without a stall. */
908 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
909 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
910 r600_resource_reference(&query->buffer.buf, NULL);
911 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
912 } else {
913 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
914 r600_resource_reference(&query->buffer.buf, NULL);
915 }
916 }
917
918 bool r600_query_hw_begin(struct r600_common_context *rctx,
919 struct r600_query *rquery)
920 {
921 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
922
923 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
924 assert(0);
925 return false;
926 }
927
928 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
929 r600_query_hw_reset_buffers(rctx, query);
930
931 r600_query_hw_emit_start(rctx, query);
932 if (!query->buffer.buf)
933 return false;
934
935 LIST_ADDTAIL(&query->list, &rctx->active_queries);
936 return true;
937 }
938
939 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
940 {
941 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
942 struct r600_query *rquery = (struct r600_query *)query;
943
944 return rquery->ops->end(rctx, rquery);
945 }
946
947 bool r600_query_hw_end(struct r600_common_context *rctx,
948 struct r600_query *rquery)
949 {
950 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
951
952 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
953 r600_query_hw_reset_buffers(rctx, query);
954
955 r600_query_hw_emit_stop(rctx, query);
956
957 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
958 LIST_DELINIT(&query->list);
959
960 if (!query->buffer.buf)
961 return false;
962
963 return true;
964 }
965
966 static void r600_get_hw_query_params(struct r600_common_context *rctx,
967 struct r600_query_hw *rquery, int index,
968 struct r600_hw_query_params *params)
969 {
970 unsigned max_rbs = rctx->screen->info.num_render_backends;
971
972 params->pair_stride = 0;
973 params->pair_count = 1;
974
975 switch (rquery->b.type) {
976 case PIPE_QUERY_OCCLUSION_COUNTER:
977 case PIPE_QUERY_OCCLUSION_PREDICATE:
978 params->start_offset = 0;
979 params->end_offset = 8;
980 params->fence_offset = max_rbs * 16;
981 params->pair_stride = 16;
982 params->pair_count = max_rbs;
983 break;
984 case PIPE_QUERY_TIME_ELAPSED:
985 params->start_offset = 0;
986 params->end_offset = 8;
987 params->fence_offset = 16;
988 break;
989 case PIPE_QUERY_TIMESTAMP:
990 params->start_offset = 0;
991 params->end_offset = 0;
992 params->fence_offset = 8;
993 break;
994 case PIPE_QUERY_PRIMITIVES_EMITTED:
995 params->start_offset = 8;
996 params->end_offset = 24;
997 params->fence_offset = params->end_offset + 4;
998 break;
999 case PIPE_QUERY_PRIMITIVES_GENERATED:
1000 params->start_offset = 0;
1001 params->end_offset = 16;
1002 params->fence_offset = params->end_offset + 4;
1003 break;
1004 case PIPE_QUERY_SO_STATISTICS:
1005 params->start_offset = 8 - index * 8;
1006 params->end_offset = 24 - index * 8;
1007 params->fence_offset = params->end_offset + 4;
1008 break;
1009 case PIPE_QUERY_PIPELINE_STATISTICS:
1010 {
1011 /* Offsets apply to EG+ */
1012 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1013 params->start_offset = offsets[index];
1014 params->end_offset = 88 + offsets[index];
1015 params->fence_offset = 2 * 88;
1016 break;
1017 }
1018 default:
1019 unreachable("r600_get_hw_query_params unsupported");
1020 }
1021 }
1022
1023 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1024 bool test_status_bit)
1025 {
1026 uint32_t *current_result = (uint32_t*)map;
1027 uint64_t start, end;
1028
1029 start = (uint64_t)current_result[start_index] |
1030 (uint64_t)current_result[start_index+1] << 32;
1031 end = (uint64_t)current_result[end_index] |
1032 (uint64_t)current_result[end_index+1] << 32;
1033
1034 if (!test_status_bit ||
1035 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1036 return end - start;
1037 }
1038 return 0;
1039 }
1040
1041 static void r600_query_hw_add_result(struct r600_common_context *ctx,
1042 struct r600_query_hw *query,
1043 void *buffer,
1044 union pipe_query_result *result)
1045 {
1046 unsigned max_rbs = ctx->screen->info.num_render_backends;
1047
1048 switch (query->b.type) {
1049 case PIPE_QUERY_OCCLUSION_COUNTER: {
1050 for (unsigned i = 0; i < max_rbs; ++i) {
1051 unsigned results_base = i * 16;
1052 result->u64 +=
1053 r600_query_read_result(buffer + results_base, 0, 2, true);
1054 }
1055 break;
1056 }
1057 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1058 for (unsigned i = 0; i < max_rbs; ++i) {
1059 unsigned results_base = i * 16;
1060 result->b = result->b ||
1061 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1062 }
1063 break;
1064 }
1065 case PIPE_QUERY_TIME_ELAPSED:
1066 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1067 break;
1068 case PIPE_QUERY_TIMESTAMP:
1069 result->u64 = *(uint64_t*)buffer;
1070 break;
1071 case PIPE_QUERY_PRIMITIVES_EMITTED:
1072 /* SAMPLE_STREAMOUTSTATS stores this structure:
1073 * {
1074 * u64 NumPrimitivesWritten;
1075 * u64 PrimitiveStorageNeeded;
1076 * }
1077 * We only need NumPrimitivesWritten here. */
1078 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1079 break;
1080 case PIPE_QUERY_PRIMITIVES_GENERATED:
1081 /* Here we read PrimitiveStorageNeeded. */
1082 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1083 break;
1084 case PIPE_QUERY_SO_STATISTICS:
1085 result->so_statistics.num_primitives_written +=
1086 r600_query_read_result(buffer, 2, 6, true);
1087 result->so_statistics.primitives_storage_needed +=
1088 r600_query_read_result(buffer, 0, 4, true);
1089 break;
1090 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1091 result->b = result->b ||
1092 r600_query_read_result(buffer, 2, 6, true) !=
1093 r600_query_read_result(buffer, 0, 4, true);
1094 break;
1095 case PIPE_QUERY_PIPELINE_STATISTICS:
1096 if (ctx->chip_class >= EVERGREEN) {
1097 result->pipeline_statistics.ps_invocations +=
1098 r600_query_read_result(buffer, 0, 22, false);
1099 result->pipeline_statistics.c_primitives +=
1100 r600_query_read_result(buffer, 2, 24, false);
1101 result->pipeline_statistics.c_invocations +=
1102 r600_query_read_result(buffer, 4, 26, false);
1103 result->pipeline_statistics.vs_invocations +=
1104 r600_query_read_result(buffer, 6, 28, false);
1105 result->pipeline_statistics.gs_invocations +=
1106 r600_query_read_result(buffer, 8, 30, false);
1107 result->pipeline_statistics.gs_primitives +=
1108 r600_query_read_result(buffer, 10, 32, false);
1109 result->pipeline_statistics.ia_primitives +=
1110 r600_query_read_result(buffer, 12, 34, false);
1111 result->pipeline_statistics.ia_vertices +=
1112 r600_query_read_result(buffer, 14, 36, false);
1113 result->pipeline_statistics.hs_invocations +=
1114 r600_query_read_result(buffer, 16, 38, false);
1115 result->pipeline_statistics.ds_invocations +=
1116 r600_query_read_result(buffer, 18, 40, false);
1117 result->pipeline_statistics.cs_invocations +=
1118 r600_query_read_result(buffer, 20, 42, false);
1119 } else {
1120 result->pipeline_statistics.ps_invocations +=
1121 r600_query_read_result(buffer, 0, 16, false);
1122 result->pipeline_statistics.c_primitives +=
1123 r600_query_read_result(buffer, 2, 18, false);
1124 result->pipeline_statistics.c_invocations +=
1125 r600_query_read_result(buffer, 4, 20, false);
1126 result->pipeline_statistics.vs_invocations +=
1127 r600_query_read_result(buffer, 6, 22, false);
1128 result->pipeline_statistics.gs_invocations +=
1129 r600_query_read_result(buffer, 8, 24, false);
1130 result->pipeline_statistics.gs_primitives +=
1131 r600_query_read_result(buffer, 10, 26, false);
1132 result->pipeline_statistics.ia_primitives +=
1133 r600_query_read_result(buffer, 12, 28, false);
1134 result->pipeline_statistics.ia_vertices +=
1135 r600_query_read_result(buffer, 14, 30, false);
1136 }
1137 #if 0 /* for testing */
1138 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1139 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1140 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1141 result->pipeline_statistics.ia_vertices,
1142 result->pipeline_statistics.ia_primitives,
1143 result->pipeline_statistics.vs_invocations,
1144 result->pipeline_statistics.hs_invocations,
1145 result->pipeline_statistics.ds_invocations,
1146 result->pipeline_statistics.gs_invocations,
1147 result->pipeline_statistics.gs_primitives,
1148 result->pipeline_statistics.c_invocations,
1149 result->pipeline_statistics.c_primitives,
1150 result->pipeline_statistics.ps_invocations,
1151 result->pipeline_statistics.cs_invocations);
1152 #endif
1153 break;
1154 default:
1155 assert(0);
1156 }
1157 }
1158
1159 static boolean r600_get_query_result(struct pipe_context *ctx,
1160 struct pipe_query *query, boolean wait,
1161 union pipe_query_result *result)
1162 {
1163 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1164 struct r600_query *rquery = (struct r600_query *)query;
1165
1166 return rquery->ops->get_result(rctx, rquery, wait, result);
1167 }
1168
1169 static void r600_get_query_result_resource(struct pipe_context *ctx,
1170 struct pipe_query *query,
1171 boolean wait,
1172 enum pipe_query_value_type result_type,
1173 int index,
1174 struct pipe_resource *resource,
1175 unsigned offset)
1176 {
1177 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1178 struct r600_query *rquery = (struct r600_query *)query;
1179
1180 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1181 resource, offset);
1182 }
1183
1184 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1185 union pipe_query_result *result)
1186 {
1187 util_query_clear_result(result, query->b.type);
1188 }
1189
1190 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1191 struct r600_query *rquery,
1192 bool wait, union pipe_query_result *result)
1193 {
1194 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1195 struct r600_query_buffer *qbuf;
1196
1197 query->ops->clear_result(query, result);
1198
1199 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1200 unsigned results_base = 0;
1201 void *map;
1202
1203 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1204 PIPE_TRANSFER_READ |
1205 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1206 if (!map)
1207 return false;
1208
1209 while (results_base != qbuf->results_end) {
1210 query->ops->add_result(rctx, query, map + results_base,
1211 result);
1212 results_base += query->result_size;
1213 }
1214 }
1215
1216 /* Convert the time to expected units. */
1217 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1218 rquery->type == PIPE_QUERY_TIMESTAMP) {
1219 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1220 }
1221 return true;
1222 }
1223
1224 /* Create the compute shader that is used to collect the results.
1225 *
1226 * One compute grid with a single thread is launched for every query result
1227 * buffer. The thread (optionally) reads a previous summary buffer, then
1228 * accumulates data from the query result buffer, and writes the result either
1229 * to a summary buffer to be consumed by the next grid invocation or to the
1230 * user-supplied buffer.
1231 *
1232 * Data layout:
1233 *
1234 * CONST
1235 * 0.x = end_offset
1236 * 0.y = result_stride
1237 * 0.z = result_count
1238 * 0.w = bit field:
1239 * 1: read previously accumulated values
1240 * 2: write accumulated values for chaining
1241 * 4: write result available
1242 * 8: convert result to boolean (0/1)
1243 * 16: only read one dword and use that as result
1244 * 32: apply timestamp conversion
1245 * 64: store full 64 bits result
1246 * 128: store signed 32 bits result
1247 * 1.x = fence_offset
1248 * 1.y = pair_stride
1249 * 1.z = pair_count
1250 *
1251 * BUFFER[0] = query result buffer
1252 * BUFFER[1] = previous summary buffer
1253 * BUFFER[2] = next summary buffer or user-supplied buffer
1254 */
1255 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1256 {
1257 /* TEMP[0].xy = accumulated result so far
1258 * TEMP[0].z = result not available
1259 *
1260 * TEMP[1].x = current result index
1261 * TEMP[1].y = current pair index
1262 */
1263 static const char text_tmpl[] =
1264 "COMP\n"
1265 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1266 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1267 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1268 "DCL BUFFER[0]\n"
1269 "DCL BUFFER[1]\n"
1270 "DCL BUFFER[2]\n"
1271 "DCL CONST[0..1]\n"
1272 "DCL TEMP[0..5]\n"
1273 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1274 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1275 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1276 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1277
1278 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1279 "UIF TEMP[5]\n"
1280 /* Check result availability. */
1281 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1282 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1283 "MOV TEMP[1], TEMP[0].zzzz\n"
1284 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1285
1286 /* Load result if available. */
1287 "UIF TEMP[1]\n"
1288 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1289 "ENDIF\n"
1290 "ELSE\n"
1291 /* Load previously accumulated result if requested. */
1292 "MOV TEMP[0], IMM[0].xxxx\n"
1293 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1294 "UIF TEMP[4]\n"
1295 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1296 "ENDIF\n"
1297
1298 "MOV TEMP[1].x, IMM[0].xxxx\n"
1299 "BGNLOOP\n"
1300 /* Break if accumulated result so far is not available. */
1301 "UIF TEMP[0].zzzz\n"
1302 "BRK\n"
1303 "ENDIF\n"
1304
1305 /* Break if result_index >= result_count. */
1306 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1307 "UIF TEMP[5]\n"
1308 "BRK\n"
1309 "ENDIF\n"
1310
1311 /* Load fence and check result availability */
1312 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1313 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1314 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1315 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1316 "UIF TEMP[0].zzzz\n"
1317 "BRK\n"
1318 "ENDIF\n"
1319
1320 "MOV TEMP[1].y, IMM[0].xxxx\n"
1321 "BGNLOOP\n"
1322 /* Load start and end. */
1323 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1324 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1325 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1326
1327 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1328 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1329
1330 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1331 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1332
1333 /* Increment pair index */
1334 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1335 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1336 "UIF TEMP[5]\n"
1337 "BRK\n"
1338 "ENDIF\n"
1339 "ENDLOOP\n"
1340
1341 /* Increment result index */
1342 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1343 "ENDLOOP\n"
1344 "ENDIF\n"
1345
1346 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1347 "UIF TEMP[4]\n"
1348 /* Store accumulated data for chaining. */
1349 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1350 "ELSE\n"
1351 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1352 "UIF TEMP[4]\n"
1353 /* Store result availability. */
1354 "NOT TEMP[0].z, TEMP[0]\n"
1355 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1356 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1357
1358 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1359 "UIF TEMP[4]\n"
1360 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1361 "ENDIF\n"
1362 "ELSE\n"
1363 /* Store result if it is available. */
1364 "NOT TEMP[4], TEMP[0].zzzz\n"
1365 "UIF TEMP[4]\n"
1366 /* Apply timestamp conversion */
1367 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1368 "UIF TEMP[4]\n"
1369 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1370 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1371 "ENDIF\n"
1372
1373 /* Convert to boolean */
1374 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1375 "UIF TEMP[4]\n"
1376 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1377 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1378 "MOV TEMP[0].y, IMM[0].xxxx\n"
1379 "ENDIF\n"
1380
1381 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1382 "UIF TEMP[4]\n"
1383 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1384 "ELSE\n"
1385 /* Clamping */
1386 "UIF TEMP[0].yyyy\n"
1387 "MOV TEMP[0].x, IMM[0].wwww\n"
1388 "ENDIF\n"
1389
1390 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1391 "UIF TEMP[4]\n"
1392 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1393 "ENDIF\n"
1394
1395 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1396 "ENDIF\n"
1397 "ENDIF\n"
1398 "ENDIF\n"
1399 "ENDIF\n"
1400
1401 "END\n";
1402
1403 char text[sizeof(text_tmpl) + 32];
1404 struct tgsi_token tokens[1024];
1405 struct pipe_compute_state state = {};
1406
1407 /* Hard code the frequency into the shader so that the backend can
1408 * use the full range of optimizations for divide-by-constant.
1409 */
1410 snprintf(text, sizeof(text), text_tmpl,
1411 rctx->screen->info.clock_crystal_freq);
1412
1413 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1414 assert(false);
1415 return;
1416 }
1417
1418 state.ir_type = PIPE_SHADER_IR_TGSI;
1419 state.prog = tokens;
1420
1421 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1422 }
1423
1424 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1425 struct r600_qbo_state *st)
1426 {
1427 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1428
1429 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1430 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1431
1432 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1433 for (unsigned i = 0; i < 3; ++i)
1434 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1435 }
1436
1437 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1438 struct r600_query *rquery,
1439 bool wait,
1440 enum pipe_query_value_type result_type,
1441 int index,
1442 struct pipe_resource *resource,
1443 unsigned offset)
1444 {
1445 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1446 struct r600_query_buffer *qbuf;
1447 struct r600_query_buffer *qbuf_prev;
1448 struct pipe_resource *tmp_buffer = NULL;
1449 unsigned tmp_buffer_offset = 0;
1450 struct r600_qbo_state saved_state = {};
1451 struct pipe_grid_info grid = {};
1452 struct pipe_constant_buffer constant_buffer = {};
1453 struct pipe_shader_buffer ssbo[3];
1454 struct r600_hw_query_params params;
1455 struct {
1456 uint32_t end_offset;
1457 uint32_t result_stride;
1458 uint32_t result_count;
1459 uint32_t config;
1460 uint32_t fence_offset;
1461 uint32_t pair_stride;
1462 uint32_t pair_count;
1463 } consts;
1464
1465 if (!rctx->query_result_shader) {
1466 r600_create_query_result_shader(rctx);
1467 if (!rctx->query_result_shader)
1468 return;
1469 }
1470
1471 if (query->buffer.previous) {
1472 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1473 &tmp_buffer_offset, &tmp_buffer);
1474 if (!tmp_buffer)
1475 return;
1476 }
1477
1478 rctx->save_qbo_state(&rctx->b, &saved_state);
1479
1480 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1481 consts.end_offset = params.end_offset - params.start_offset;
1482 consts.fence_offset = params.fence_offset - params.start_offset;
1483 consts.result_stride = query->result_size;
1484 consts.pair_stride = params.pair_stride;
1485 consts.pair_count = params.pair_count;
1486
1487 constant_buffer.buffer_size = sizeof(consts);
1488 constant_buffer.user_buffer = &consts;
1489
1490 ssbo[1].buffer = tmp_buffer;
1491 ssbo[1].buffer_offset = tmp_buffer_offset;
1492 ssbo[1].buffer_size = 16;
1493
1494 ssbo[2] = ssbo[1];
1495
1496 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1497
1498 grid.block[0] = 1;
1499 grid.block[1] = 1;
1500 grid.block[2] = 1;
1501 grid.grid[0] = 1;
1502 grid.grid[1] = 1;
1503 grid.grid[2] = 1;
1504
1505 consts.config = 0;
1506 if (index < 0)
1507 consts.config |= 4;
1508 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1509 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1510 consts.config |= 8;
1511 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1512 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1513 consts.config |= 32;
1514
1515 switch (result_type) {
1516 case PIPE_QUERY_TYPE_U64:
1517 case PIPE_QUERY_TYPE_I64:
1518 consts.config |= 64;
1519 break;
1520 case PIPE_QUERY_TYPE_I32:
1521 consts.config |= 128;
1522 break;
1523 case PIPE_QUERY_TYPE_U32:
1524 break;
1525 }
1526
1527 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1528
1529 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1530 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1531 qbuf_prev = qbuf->previous;
1532 consts.result_count = qbuf->results_end / query->result_size;
1533 consts.config &= ~3;
1534 if (qbuf != &query->buffer)
1535 consts.config |= 1;
1536 if (qbuf->previous)
1537 consts.config |= 2;
1538 } else {
1539 /* Only read the last timestamp. */
1540 qbuf_prev = NULL;
1541 consts.result_count = 0;
1542 consts.config |= 16;
1543 params.start_offset += qbuf->results_end - query->result_size;
1544 }
1545
1546 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1547
1548 ssbo[0].buffer = &qbuf->buf->b.b;
1549 ssbo[0].buffer_offset = params.start_offset;
1550 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1551
1552 if (!qbuf->previous) {
1553 ssbo[2].buffer = resource;
1554 ssbo[2].buffer_offset = offset;
1555 ssbo[2].buffer_size = 8;
1556
1557 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1558 }
1559
1560 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1561
1562 if (wait && qbuf == &query->buffer) {
1563 uint64_t va;
1564
1565 /* Wait for result availability. Wait only for readiness
1566 * of the last entry, since the fence writes should be
1567 * serialized in the CP.
1568 */
1569 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1570 va += params.fence_offset;
1571
1572 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1573 }
1574
1575 rctx->b.launch_grid(&rctx->b, &grid);
1576 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1577 }
1578
1579 r600_restore_qbo_state(rctx, &saved_state);
1580 pipe_resource_reference(&tmp_buffer, NULL);
1581 }
1582
1583 static void r600_render_condition(struct pipe_context *ctx,
1584 struct pipe_query *query,
1585 boolean condition,
1586 enum pipe_render_cond_flag mode)
1587 {
1588 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1589 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1590 struct r600_query_buffer *qbuf;
1591 struct r600_atom *atom = &rctx->render_cond_atom;
1592
1593 rctx->render_cond = query;
1594 rctx->render_cond_invert = condition;
1595 rctx->render_cond_mode = mode;
1596
1597 /* Compute the size of SET_PREDICATION packets. */
1598 atom->num_dw = 0;
1599 if (query) {
1600 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1601 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1602 }
1603
1604 rctx->set_atom_dirty(rctx, atom, query != NULL);
1605 }
1606
1607 void r600_suspend_queries(struct r600_common_context *ctx)
1608 {
1609 struct r600_query_hw *query;
1610
1611 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1612 r600_query_hw_emit_stop(ctx, query);
1613 }
1614 assert(ctx->num_cs_dw_queries_suspend == 0);
1615 }
1616
1617 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1618 struct list_head *query_list)
1619 {
1620 struct r600_query_hw *query;
1621 unsigned num_dw = 0;
1622
1623 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1624 /* begin + end */
1625 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1626
1627 /* Workaround for the fact that
1628 * num_cs_dw_nontimer_queries_suspend is incremented for every
1629 * resumed query, which raises the bar in need_cs_space for
1630 * queries about to be resumed.
1631 */
1632 num_dw += query->num_cs_dw_end;
1633 }
1634 /* primitives generated query */
1635 num_dw += ctx->streamout.enable_atom.num_dw;
1636 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1637 num_dw += 13;
1638
1639 return num_dw;
1640 }
1641
1642 void r600_resume_queries(struct r600_common_context *ctx)
1643 {
1644 struct r600_query_hw *query;
1645 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1646
1647 assert(ctx->num_cs_dw_queries_suspend == 0);
1648
1649 /* Check CS space here. Resuming must not be interrupted by flushes. */
1650 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1651
1652 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1653 r600_query_hw_emit_start(ctx, query);
1654 }
1655 }
1656
1657 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1658 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1659 {
1660 struct r600_common_context *ctx =
1661 (struct r600_common_context*)rscreen->aux_context;
1662 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1663 struct r600_resource *buffer;
1664 uint32_t *results;
1665 unsigned i, mask = 0;
1666 unsigned max_rbs = ctx->screen->info.num_render_backends;
1667
1668 assert(rscreen->chip_class <= CAYMAN);
1669
1670 /* if backend_map query is supported by the kernel */
1671 if (rscreen->info.r600_gb_backend_map_valid) {
1672 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1673 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1674 unsigned item_width, item_mask;
1675
1676 if (ctx->chip_class >= EVERGREEN) {
1677 item_width = 4;
1678 item_mask = 0x7;
1679 } else {
1680 item_width = 2;
1681 item_mask = 0x3;
1682 }
1683
1684 while (num_tile_pipes--) {
1685 i = backend_map & item_mask;
1686 mask |= (1<<i);
1687 backend_map >>= item_width;
1688 }
1689 if (mask != 0) {
1690 rscreen->info.enabled_rb_mask = mask;
1691 return;
1692 }
1693 }
1694
1695 /* otherwise backup path for older kernels */
1696
1697 /* create buffer for event data */
1698 buffer = (struct r600_resource*)
1699 pipe_buffer_create(ctx->b.screen, 0,
1700 PIPE_USAGE_STAGING, max_rbs * 16);
1701 if (!buffer)
1702 return;
1703
1704 /* initialize buffer with zeroes */
1705 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1706 if (results) {
1707 memset(results, 0, max_rbs * 4 * 4);
1708
1709 /* emit EVENT_WRITE for ZPASS_DONE */
1710 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1711 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1712 radeon_emit(cs, buffer->gpu_address);
1713 radeon_emit(cs, buffer->gpu_address >> 32);
1714
1715 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1716 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1717
1718 /* analyze results */
1719 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1720 if (results) {
1721 for(i = 0; i < max_rbs; i++) {
1722 /* at least highest bit will be set if backend is used */
1723 if (results[i*4 + 1])
1724 mask |= (1<<i);
1725 }
1726 }
1727 }
1728
1729 r600_resource_reference(&buffer, NULL);
1730
1731 if (mask)
1732 rscreen->info.enabled_rb_mask = mask;
1733 }
1734
1735 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1736 { \
1737 .name = name_, \
1738 .query_type = R600_QUERY_##query_type_, \
1739 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1740 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1741 .group_id = group_id_ \
1742 }
1743
1744 #define X(name_, query_type_, type_, result_type_) \
1745 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1746
1747 #define XG(group_, name_, query_type_, type_, result_type_) \
1748 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1749
1750 static struct pipe_driver_query_info r600_driver_query_list[] = {
1751 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1752 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1753 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1754 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1755 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1756 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1757 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1758 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1759 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1760 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1761 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1762 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1763 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1764 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1765 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1766 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1767 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1768 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1769 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1770 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1771 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1772 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1773 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1774 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1775 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1776 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1777 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1778 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1779 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1780 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1781
1782 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1783 * which use it as a fallback path to detect the GPU type.
1784 *
1785 * Note: The names of these queries are significant for GPUPerfStudio
1786 * (and possibly their order as well). */
1787 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1788 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1789 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1790 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1791 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1792
1793 /* The following queries must be at the end of the list because their
1794 * availability is adjusted dynamically based on the DRM version. */
1795 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1796 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1797 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1798 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1799 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1800 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1801 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1802 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1803 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1804 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1805 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1806 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1807 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1808 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1809 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1810 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1811 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1812 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1813 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1814 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1815 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1816 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1817
1818 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1819 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1820 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1821 };
1822
1823 #undef X
1824 #undef XG
1825 #undef XFULL
1826
1827 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1828 {
1829 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1830 return ARRAY_SIZE(r600_driver_query_list);
1831 else if (rscreen->info.drm_major == 3) {
1832 if (rscreen->chip_class >= VI)
1833 return ARRAY_SIZE(r600_driver_query_list) - 3;
1834 else
1835 return ARRAY_SIZE(r600_driver_query_list) - 10;
1836 }
1837 else
1838 return ARRAY_SIZE(r600_driver_query_list) - 25;
1839 }
1840
1841 static int r600_get_driver_query_info(struct pipe_screen *screen,
1842 unsigned index,
1843 struct pipe_driver_query_info *info)
1844 {
1845 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1846 unsigned num_queries = r600_get_num_queries(rscreen);
1847
1848 if (!info) {
1849 unsigned num_perfcounters =
1850 r600_get_perfcounter_info(rscreen, 0, NULL);
1851
1852 return num_queries + num_perfcounters;
1853 }
1854
1855 if (index >= num_queries)
1856 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1857
1858 *info = r600_driver_query_list[index];
1859
1860 switch (info->query_type) {
1861 case R600_QUERY_REQUESTED_VRAM:
1862 case R600_QUERY_VRAM_USAGE:
1863 case R600_QUERY_MAPPED_VRAM:
1864 info->max_value.u64 = rscreen->info.vram_size;
1865 break;
1866 case R600_QUERY_REQUESTED_GTT:
1867 case R600_QUERY_GTT_USAGE:
1868 case R600_QUERY_MAPPED_GTT:
1869 info->max_value.u64 = rscreen->info.gart_size;
1870 break;
1871 case R600_QUERY_GPU_TEMPERATURE:
1872 info->max_value.u64 = 125;
1873 break;
1874 case R600_QUERY_VRAM_VIS_USAGE:
1875 info->max_value.u64 = rscreen->info.vram_vis_size;
1876 break;
1877 }
1878
1879 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1880 info->group_id += rscreen->perfcounters->num_groups;
1881
1882 return 1;
1883 }
1884
1885 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1886 * performance counter groups, so be careful when changing this and related
1887 * functions.
1888 */
1889 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1890 unsigned index,
1891 struct pipe_driver_query_group_info *info)
1892 {
1893 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1894 unsigned num_pc_groups = 0;
1895
1896 if (rscreen->perfcounters)
1897 num_pc_groups = rscreen->perfcounters->num_groups;
1898
1899 if (!info)
1900 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1901
1902 if (index < num_pc_groups)
1903 return r600_get_perfcounter_group_info(rscreen, index, info);
1904
1905 index -= num_pc_groups;
1906 if (index >= R600_NUM_SW_QUERY_GROUPS)
1907 return 0;
1908
1909 info->name = "GPIN";
1910 info->max_active_queries = 5;
1911 info->num_queries = 5;
1912 return 1;
1913 }
1914
1915 void r600_query_init(struct r600_common_context *rctx)
1916 {
1917 rctx->b.create_query = r600_create_query;
1918 rctx->b.create_batch_query = r600_create_batch_query;
1919 rctx->b.destroy_query = r600_destroy_query;
1920 rctx->b.begin_query = r600_begin_query;
1921 rctx->b.end_query = r600_end_query;
1922 rctx->b.get_query_result = r600_get_query_result;
1923 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1924 rctx->render_cond_atom.emit = r600_emit_query_predication;
1925
1926 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1927 rctx->b.render_condition = r600_render_condition;
1928
1929 LIST_INITHEAD(&rctx->active_queries);
1930 }
1931
1932 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1933 {
1934 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1935 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1936 }