freedreno: update generated headers
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46
47 uint64_t begin_time;
48 uint64_t end_time;
49
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle *fence;
52 };
53
54 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
55 struct r600_query *rquery)
56 {
57 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
58
59 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
60 FREE(query);
61 }
62
63 static enum radeon_value_id winsys_id_from_type(unsigned type)
64 {
65 switch (type) {
66 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
67 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
68 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
69 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
70 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
71 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
72 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
73 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
74 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
75 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
76 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
77 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
78 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
79 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
80 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
81 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
82 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
83 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
84 default: unreachable("query type does not correspond to winsys id");
85 }
86 }
87
88 static bool r600_query_sw_begin(struct r600_common_context *rctx,
89 struct r600_query *rquery)
90 {
91 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
92 enum radeon_value_id ws_id;
93
94 switch(query->b.type) {
95 case PIPE_QUERY_TIMESTAMP_DISJOINT:
96 case PIPE_QUERY_GPU_FINISHED:
97 break;
98 case R600_QUERY_DRAW_CALLS:
99 query->begin_result = rctx->num_draw_calls;
100 break;
101 case R600_QUERY_PRIM_RESTART_CALLS:
102 query->begin_result = rctx->num_prim_restart_calls;
103 break;
104 case R600_QUERY_SPILL_DRAW_CALLS:
105 query->begin_result = rctx->num_spill_draw_calls;
106 break;
107 case R600_QUERY_COMPUTE_CALLS:
108 query->begin_result = rctx->num_compute_calls;
109 break;
110 case R600_QUERY_SPILL_COMPUTE_CALLS:
111 query->begin_result = rctx->num_spill_compute_calls;
112 break;
113 case R600_QUERY_DMA_CALLS:
114 query->begin_result = rctx->num_dma_calls;
115 break;
116 case R600_QUERY_CP_DMA_CALLS:
117 query->begin_result = rctx->num_cp_dma_calls;
118 break;
119 case R600_QUERY_NUM_VS_FLUSHES:
120 query->begin_result = rctx->num_vs_flushes;
121 break;
122 case R600_QUERY_NUM_PS_FLUSHES:
123 query->begin_result = rctx->num_ps_flushes;
124 break;
125 case R600_QUERY_NUM_CS_FLUSHES:
126 query->begin_result = rctx->num_cs_flushes;
127 break;
128 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
129 query->begin_result = rctx->num_fb_cache_flushes;
130 break;
131 case R600_QUERY_NUM_L2_INVALIDATES:
132 query->begin_result = rctx->num_L2_invalidates;
133 break;
134 case R600_QUERY_NUM_L2_WRITEBACKS:
135 query->begin_result = rctx->num_L2_writebacks;
136 break;
137 case R600_QUERY_TC_OFFLOADED_SLOTS:
138 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
139 break;
140 case R600_QUERY_TC_DIRECT_SLOTS:
141 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
142 break;
143 case R600_QUERY_TC_NUM_SYNCS:
144 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
145 break;
146 case R600_QUERY_REQUESTED_VRAM:
147 case R600_QUERY_REQUESTED_GTT:
148 case R600_QUERY_MAPPED_VRAM:
149 case R600_QUERY_MAPPED_GTT:
150 case R600_QUERY_VRAM_USAGE:
151 case R600_QUERY_VRAM_VIS_USAGE:
152 case R600_QUERY_GTT_USAGE:
153 case R600_QUERY_GPU_TEMPERATURE:
154 case R600_QUERY_CURRENT_GPU_SCLK:
155 case R600_QUERY_CURRENT_GPU_MCLK:
156 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
157 case R600_QUERY_NUM_MAPPED_BUFFERS:
158 query->begin_result = 0;
159 break;
160 case R600_QUERY_BUFFER_WAIT_TIME:
161 case R600_QUERY_NUM_GFX_IBS:
162 case R600_QUERY_NUM_SDMA_IBS:
163 case R600_QUERY_NUM_BYTES_MOVED:
164 case R600_QUERY_NUM_EVICTIONS:
165 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
166 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
167 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
168 break;
169 }
170 case R600_QUERY_CS_THREAD_BUSY:
171 ws_id = winsys_id_from_type(query->b.type);
172 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
173 query->begin_time = os_time_get_nano();
174 break;
175 case R600_QUERY_GALLIUM_THREAD_BUSY:
176 query->begin_result =
177 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
178 query->begin_time = os_time_get_nano();
179 break;
180 case R600_QUERY_GPU_LOAD:
181 case R600_QUERY_GPU_SHADERS_BUSY:
182 case R600_QUERY_GPU_TA_BUSY:
183 case R600_QUERY_GPU_GDS_BUSY:
184 case R600_QUERY_GPU_VGT_BUSY:
185 case R600_QUERY_GPU_IA_BUSY:
186 case R600_QUERY_GPU_SX_BUSY:
187 case R600_QUERY_GPU_WD_BUSY:
188 case R600_QUERY_GPU_BCI_BUSY:
189 case R600_QUERY_GPU_SC_BUSY:
190 case R600_QUERY_GPU_PA_BUSY:
191 case R600_QUERY_GPU_DB_BUSY:
192 case R600_QUERY_GPU_CP_BUSY:
193 case R600_QUERY_GPU_CB_BUSY:
194 case R600_QUERY_GPU_SDMA_BUSY:
195 case R600_QUERY_GPU_PFP_BUSY:
196 case R600_QUERY_GPU_MEQ_BUSY:
197 case R600_QUERY_GPU_ME_BUSY:
198 case R600_QUERY_GPU_SURF_SYNC_BUSY:
199 case R600_QUERY_GPU_DMA_BUSY:
200 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
201 case R600_QUERY_GPU_CE_BUSY:
202 query->begin_result = r600_begin_counter(rctx->screen,
203 query->b.type);
204 break;
205 case R600_QUERY_NUM_COMPILATIONS:
206 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
207 break;
208 case R600_QUERY_NUM_SHADERS_CREATED:
209 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
210 break;
211 case R600_QUERY_NUM_SHADER_CACHE_HITS:
212 query->begin_result =
213 p_atomic_read(&rctx->screen->num_shader_cache_hits);
214 break;
215 case R600_QUERY_GPIN_ASIC_ID:
216 case R600_QUERY_GPIN_NUM_SIMD:
217 case R600_QUERY_GPIN_NUM_RB:
218 case R600_QUERY_GPIN_NUM_SPI:
219 case R600_QUERY_GPIN_NUM_SE:
220 break;
221 default:
222 unreachable("r600_query_sw_begin: bad query type");
223 }
224
225 return true;
226 }
227
228 static bool r600_query_sw_end(struct r600_common_context *rctx,
229 struct r600_query *rquery)
230 {
231 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
232 enum radeon_value_id ws_id;
233
234 switch(query->b.type) {
235 case PIPE_QUERY_TIMESTAMP_DISJOINT:
236 break;
237 case PIPE_QUERY_GPU_FINISHED:
238 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
239 break;
240 case R600_QUERY_DRAW_CALLS:
241 query->end_result = rctx->num_draw_calls;
242 break;
243 case R600_QUERY_PRIM_RESTART_CALLS:
244 query->end_result = rctx->num_prim_restart_calls;
245 break;
246 case R600_QUERY_SPILL_DRAW_CALLS:
247 query->end_result = rctx->num_spill_draw_calls;
248 break;
249 case R600_QUERY_COMPUTE_CALLS:
250 query->end_result = rctx->num_compute_calls;
251 break;
252 case R600_QUERY_SPILL_COMPUTE_CALLS:
253 query->end_result = rctx->num_spill_compute_calls;
254 break;
255 case R600_QUERY_DMA_CALLS:
256 query->end_result = rctx->num_dma_calls;
257 break;
258 case R600_QUERY_CP_DMA_CALLS:
259 query->end_result = rctx->num_cp_dma_calls;
260 break;
261 case R600_QUERY_NUM_VS_FLUSHES:
262 query->end_result = rctx->num_vs_flushes;
263 break;
264 case R600_QUERY_NUM_PS_FLUSHES:
265 query->end_result = rctx->num_ps_flushes;
266 break;
267 case R600_QUERY_NUM_CS_FLUSHES:
268 query->end_result = rctx->num_cs_flushes;
269 break;
270 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
271 query->end_result = rctx->num_fb_cache_flushes;
272 break;
273 case R600_QUERY_NUM_L2_INVALIDATES:
274 query->end_result = rctx->num_L2_invalidates;
275 break;
276 case R600_QUERY_NUM_L2_WRITEBACKS:
277 query->end_result = rctx->num_L2_writebacks;
278 break;
279 case R600_QUERY_TC_OFFLOADED_SLOTS:
280 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
281 break;
282 case R600_QUERY_TC_DIRECT_SLOTS:
283 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
284 break;
285 case R600_QUERY_TC_NUM_SYNCS:
286 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
287 break;
288 case R600_QUERY_REQUESTED_VRAM:
289 case R600_QUERY_REQUESTED_GTT:
290 case R600_QUERY_MAPPED_VRAM:
291 case R600_QUERY_MAPPED_GTT:
292 case R600_QUERY_VRAM_USAGE:
293 case R600_QUERY_VRAM_VIS_USAGE:
294 case R600_QUERY_GTT_USAGE:
295 case R600_QUERY_GPU_TEMPERATURE:
296 case R600_QUERY_CURRENT_GPU_SCLK:
297 case R600_QUERY_CURRENT_GPU_MCLK:
298 case R600_QUERY_BUFFER_WAIT_TIME:
299 case R600_QUERY_NUM_MAPPED_BUFFERS:
300 case R600_QUERY_NUM_GFX_IBS:
301 case R600_QUERY_NUM_SDMA_IBS:
302 case R600_QUERY_NUM_BYTES_MOVED:
303 case R600_QUERY_NUM_EVICTIONS:
304 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
305 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
306 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
307 break;
308 }
309 case R600_QUERY_CS_THREAD_BUSY:
310 ws_id = winsys_id_from_type(query->b.type);
311 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
312 query->end_time = os_time_get_nano();
313 break;
314 case R600_QUERY_GALLIUM_THREAD_BUSY:
315 query->end_result =
316 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
317 query->end_time = os_time_get_nano();
318 break;
319 case R600_QUERY_GPU_LOAD:
320 case R600_QUERY_GPU_SHADERS_BUSY:
321 case R600_QUERY_GPU_TA_BUSY:
322 case R600_QUERY_GPU_GDS_BUSY:
323 case R600_QUERY_GPU_VGT_BUSY:
324 case R600_QUERY_GPU_IA_BUSY:
325 case R600_QUERY_GPU_SX_BUSY:
326 case R600_QUERY_GPU_WD_BUSY:
327 case R600_QUERY_GPU_BCI_BUSY:
328 case R600_QUERY_GPU_SC_BUSY:
329 case R600_QUERY_GPU_PA_BUSY:
330 case R600_QUERY_GPU_DB_BUSY:
331 case R600_QUERY_GPU_CP_BUSY:
332 case R600_QUERY_GPU_CB_BUSY:
333 case R600_QUERY_GPU_SDMA_BUSY:
334 case R600_QUERY_GPU_PFP_BUSY:
335 case R600_QUERY_GPU_MEQ_BUSY:
336 case R600_QUERY_GPU_ME_BUSY:
337 case R600_QUERY_GPU_SURF_SYNC_BUSY:
338 case R600_QUERY_GPU_DMA_BUSY:
339 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
340 case R600_QUERY_GPU_CE_BUSY:
341 query->end_result = r600_end_counter(rctx->screen,
342 query->b.type,
343 query->begin_result);
344 query->begin_result = 0;
345 break;
346 case R600_QUERY_NUM_COMPILATIONS:
347 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
348 break;
349 case R600_QUERY_NUM_SHADERS_CREATED:
350 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
351 break;
352 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
353 query->end_result = rctx->last_tex_ps_draw_ratio;
354 break;
355 case R600_QUERY_NUM_SHADER_CACHE_HITS:
356 query->end_result =
357 p_atomic_read(&rctx->screen->num_shader_cache_hits);
358 break;
359 case R600_QUERY_GPIN_ASIC_ID:
360 case R600_QUERY_GPIN_NUM_SIMD:
361 case R600_QUERY_GPIN_NUM_RB:
362 case R600_QUERY_GPIN_NUM_SPI:
363 case R600_QUERY_GPIN_NUM_SE:
364 break;
365 default:
366 unreachable("r600_query_sw_end: bad query type");
367 }
368
369 return true;
370 }
371
372 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
373 struct r600_query *rquery,
374 bool wait,
375 union pipe_query_result *result)
376 {
377 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
378
379 switch (query->b.type) {
380 case PIPE_QUERY_TIMESTAMP_DISJOINT:
381 /* Convert from cycles per millisecond to cycles per second (Hz). */
382 result->timestamp_disjoint.frequency =
383 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
384 result->timestamp_disjoint.disjoint = false;
385 return true;
386 case PIPE_QUERY_GPU_FINISHED: {
387 struct pipe_screen *screen = rctx->b.screen;
388 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
389
390 result->b = screen->fence_finish(screen, ctx, query->fence,
391 wait ? PIPE_TIMEOUT_INFINITE : 0);
392 return result->b;
393 }
394
395 case R600_QUERY_CS_THREAD_BUSY:
396 case R600_QUERY_GALLIUM_THREAD_BUSY:
397 result->u64 = (query->end_result - query->begin_result) * 100 /
398 (query->end_time - query->begin_time);
399 return true;
400 case R600_QUERY_GPIN_ASIC_ID:
401 result->u32 = 0;
402 return true;
403 case R600_QUERY_GPIN_NUM_SIMD:
404 result->u32 = rctx->screen->info.num_good_compute_units;
405 return true;
406 case R600_QUERY_GPIN_NUM_RB:
407 result->u32 = rctx->screen->info.num_render_backends;
408 return true;
409 case R600_QUERY_GPIN_NUM_SPI:
410 result->u32 = 1; /* all supported chips have one SPI per SE */
411 return true;
412 case R600_QUERY_GPIN_NUM_SE:
413 result->u32 = rctx->screen->info.max_se;
414 return true;
415 }
416
417 result->u64 = query->end_result - query->begin_result;
418
419 switch (query->b.type) {
420 case R600_QUERY_BUFFER_WAIT_TIME:
421 case R600_QUERY_GPU_TEMPERATURE:
422 result->u64 /= 1000;
423 break;
424 case R600_QUERY_CURRENT_GPU_SCLK:
425 case R600_QUERY_CURRENT_GPU_MCLK:
426 result->u64 *= 1000000;
427 break;
428 }
429
430 return true;
431 }
432
433
434 static struct r600_query_ops sw_query_ops = {
435 .destroy = r600_query_sw_destroy,
436 .begin = r600_query_sw_begin,
437 .end = r600_query_sw_end,
438 .get_result = r600_query_sw_get_result,
439 .get_result_resource = NULL
440 };
441
442 static struct pipe_query *r600_query_sw_create(unsigned query_type)
443 {
444 struct r600_query_sw *query;
445
446 query = CALLOC_STRUCT(r600_query_sw);
447 if (!query)
448 return NULL;
449
450 query->b.type = query_type;
451 query->b.ops = &sw_query_ops;
452
453 return (struct pipe_query *)query;
454 }
455
456 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
457 struct r600_query *rquery)
458 {
459 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
460 struct r600_query_buffer *prev = query->buffer.previous;
461
462 /* Release all query buffers. */
463 while (prev) {
464 struct r600_query_buffer *qbuf = prev;
465 prev = prev->previous;
466 r600_resource_reference(&qbuf->buf, NULL);
467 FREE(qbuf);
468 }
469
470 r600_resource_reference(&query->buffer.buf, NULL);
471 FREE(rquery);
472 }
473
474 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
475 struct r600_query_hw *query)
476 {
477 unsigned buf_size = MAX2(query->result_size,
478 rscreen->info.min_alloc_size);
479
480 /* Queries are normally read by the CPU after
481 * being written by the gpu, hence staging is probably a good
482 * usage pattern.
483 */
484 struct r600_resource *buf = (struct r600_resource*)
485 pipe_buffer_create(&rscreen->b, 0,
486 PIPE_USAGE_STAGING, buf_size);
487 if (!buf)
488 return NULL;
489
490 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
491 r600_resource_reference(&buf, NULL);
492 return NULL;
493 }
494
495 return buf;
496 }
497
498 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
499 struct r600_query_hw *query,
500 struct r600_resource *buffer)
501 {
502 /* Callers ensure that the buffer is currently unused by the GPU. */
503 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
504 PIPE_TRANSFER_WRITE |
505 PIPE_TRANSFER_UNSYNCHRONIZED);
506 if (!results)
507 return false;
508
509 memset(results, 0, buffer->b.b.width0);
510
511 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
512 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
513 unsigned max_rbs = rscreen->info.num_render_backends;
514 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
515 unsigned num_results;
516 unsigned i, j;
517
518 /* Set top bits for unused backends. */
519 num_results = buffer->b.b.width0 / query->result_size;
520 for (j = 0; j < num_results; j++) {
521 for (i = 0; i < max_rbs; i++) {
522 if (!(enabled_rb_mask & (1<<i))) {
523 results[(i * 4)+1] = 0x80000000;
524 results[(i * 4)+3] = 0x80000000;
525 }
526 }
527 results += 4 * max_rbs;
528 }
529 }
530
531 return true;
532 }
533
534 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
535 struct r600_query *rquery,
536 bool wait,
537 enum pipe_query_value_type result_type,
538 int index,
539 struct pipe_resource *resource,
540 unsigned offset);
541
542 static struct r600_query_ops query_hw_ops = {
543 .destroy = r600_query_hw_destroy,
544 .begin = r600_query_hw_begin,
545 .end = r600_query_hw_end,
546 .get_result = r600_query_hw_get_result,
547 .get_result_resource = r600_query_hw_get_result_resource,
548 };
549
550 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
551 struct r600_query_hw *query,
552 struct r600_resource *buffer,
553 uint64_t va);
554 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
555 struct r600_query_hw *query,
556 struct r600_resource *buffer,
557 uint64_t va);
558 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
559 struct r600_query_hw *, void *buffer,
560 union pipe_query_result *result);
561 static void r600_query_hw_clear_result(struct r600_query_hw *,
562 union pipe_query_result *);
563
564 static struct r600_query_hw_ops query_hw_default_hw_ops = {
565 .prepare_buffer = r600_query_hw_prepare_buffer,
566 .emit_start = r600_query_hw_do_emit_start,
567 .emit_stop = r600_query_hw_do_emit_stop,
568 .clear_result = r600_query_hw_clear_result,
569 .add_result = r600_query_hw_add_result,
570 };
571
572 bool r600_query_hw_init(struct r600_common_screen *rscreen,
573 struct r600_query_hw *query)
574 {
575 query->buffer.buf = r600_new_query_buffer(rscreen, query);
576 if (!query->buffer.buf)
577 return false;
578
579 return true;
580 }
581
582 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
583 unsigned query_type,
584 unsigned index)
585 {
586 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
587 if (!query)
588 return NULL;
589
590 query->b.type = query_type;
591 query->b.ops = &query_hw_ops;
592 query->ops = &query_hw_default_hw_ops;
593
594 switch (query_type) {
595 case PIPE_QUERY_OCCLUSION_COUNTER:
596 case PIPE_QUERY_OCCLUSION_PREDICATE:
597 query->result_size = 16 * rscreen->info.num_render_backends;
598 query->result_size += 16; /* for the fence + alignment */
599 query->num_cs_dw_begin = 6;
600 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
601 break;
602 case PIPE_QUERY_TIME_ELAPSED:
603 query->result_size = 24;
604 query->num_cs_dw_begin = 8;
605 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
606 break;
607 case PIPE_QUERY_TIMESTAMP:
608 query->result_size = 16;
609 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
610 query->flags = R600_QUERY_HW_FLAG_NO_START;
611 break;
612 case PIPE_QUERY_PRIMITIVES_EMITTED:
613 case PIPE_QUERY_PRIMITIVES_GENERATED:
614 case PIPE_QUERY_SO_STATISTICS:
615 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
616 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
617 query->result_size = 32;
618 query->num_cs_dw_begin = 6;
619 query->num_cs_dw_end = 6;
620 query->stream = index;
621 break;
622 case PIPE_QUERY_PIPELINE_STATISTICS:
623 /* 11 values on EG, 8 on R600. */
624 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
625 query->result_size += 8; /* for the fence + alignment */
626 query->num_cs_dw_begin = 6;
627 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
628 break;
629 default:
630 assert(0);
631 FREE(query);
632 return NULL;
633 }
634
635 if (!r600_query_hw_init(rscreen, query)) {
636 FREE(query);
637 return NULL;
638 }
639
640 return (struct pipe_query *)query;
641 }
642
643 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
644 unsigned type, int diff)
645 {
646 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
647 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
648 bool old_enable = rctx->num_occlusion_queries != 0;
649 bool old_perfect_enable =
650 rctx->num_perfect_occlusion_queries != 0;
651 bool enable, perfect_enable;
652
653 rctx->num_occlusion_queries += diff;
654 assert(rctx->num_occlusion_queries >= 0);
655
656 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
657 rctx->num_perfect_occlusion_queries += diff;
658 assert(rctx->num_perfect_occlusion_queries >= 0);
659 }
660
661 enable = rctx->num_occlusion_queries != 0;
662 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
663
664 if (enable != old_enable || perfect_enable != old_perfect_enable) {
665 rctx->set_occlusion_query_state(&rctx->b, enable);
666 }
667 }
668 }
669
670 static unsigned event_type_for_stream(struct r600_query_hw *query)
671 {
672 switch (query->stream) {
673 default:
674 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
675 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
676 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
677 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
678 }
679 }
680
681 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
682 struct r600_query_hw *query,
683 struct r600_resource *buffer,
684 uint64_t va)
685 {
686 struct radeon_winsys_cs *cs = ctx->gfx.cs;
687
688 switch (query->b.type) {
689 case PIPE_QUERY_OCCLUSION_COUNTER:
690 case PIPE_QUERY_OCCLUSION_PREDICATE:
691 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
692 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
693 radeon_emit(cs, va);
694 radeon_emit(cs, va >> 32);
695 break;
696 case PIPE_QUERY_PRIMITIVES_EMITTED:
697 case PIPE_QUERY_PRIMITIVES_GENERATED:
698 case PIPE_QUERY_SO_STATISTICS:
699 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
700 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
701 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
702 radeon_emit(cs, va);
703 radeon_emit(cs, va >> 32);
704 break;
705 case PIPE_QUERY_TIME_ELAPSED:
706 if (ctx->chip_class >= SI) {
707 /* Write the timestamp from the CP not waiting for
708 * outstanding draws (top-of-pipe).
709 */
710 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
711 radeon_emit(cs, COPY_DATA_COUNT_SEL |
712 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
713 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
714 radeon_emit(cs, 0);
715 radeon_emit(cs, 0);
716 radeon_emit(cs, va);
717 radeon_emit(cs, va >> 32);
718 } else {
719 /* Write the timestamp after the last draw is done.
720 * (bottom-of-pipe)
721 */
722 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
723 0, 3, NULL, va, 0, 0);
724 }
725 break;
726 case PIPE_QUERY_PIPELINE_STATISTICS:
727 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
728 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
729 radeon_emit(cs, va);
730 radeon_emit(cs, va >> 32);
731 break;
732 default:
733 assert(0);
734 }
735 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
736 RADEON_PRIO_QUERY);
737 }
738
739 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
740 struct r600_query_hw *query)
741 {
742 uint64_t va;
743
744 if (!query->buffer.buf)
745 return; // previous buffer allocation failure
746
747 r600_update_occlusion_query_state(ctx, query->b.type, 1);
748 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
749
750 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
751 true);
752
753 /* Get a new query buffer if needed. */
754 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
755 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
756 *qbuf = query->buffer;
757 query->buffer.results_end = 0;
758 query->buffer.previous = qbuf;
759 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
760 if (!query->buffer.buf)
761 return;
762 }
763
764 /* emit begin query */
765 va = query->buffer.buf->gpu_address + query->buffer.results_end;
766
767 query->ops->emit_start(ctx, query, query->buffer.buf, va);
768
769 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
770 }
771
772 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
773 struct r600_query_hw *query,
774 struct r600_resource *buffer,
775 uint64_t va)
776 {
777 struct radeon_winsys_cs *cs = ctx->gfx.cs;
778 uint64_t fence_va = 0;
779
780 switch (query->b.type) {
781 case PIPE_QUERY_OCCLUSION_COUNTER:
782 case PIPE_QUERY_OCCLUSION_PREDICATE:
783 va += 8;
784 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
785 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
786 radeon_emit(cs, va);
787 radeon_emit(cs, va >> 32);
788
789 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
790 break;
791 case PIPE_QUERY_PRIMITIVES_EMITTED:
792 case PIPE_QUERY_PRIMITIVES_GENERATED:
793 case PIPE_QUERY_SO_STATISTICS:
794 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
795 va += query->result_size/2;
796 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
797 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
798 radeon_emit(cs, va);
799 radeon_emit(cs, va >> 32);
800 break;
801 case PIPE_QUERY_TIME_ELAPSED:
802 va += 8;
803 /* fall through */
804 case PIPE_QUERY_TIMESTAMP:
805 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
806 0, 3, NULL, va, 0, 0);
807 fence_va = va + 8;
808 break;
809 case PIPE_QUERY_PIPELINE_STATISTICS: {
810 unsigned sample_size = (query->result_size - 8) / 2;
811
812 va += sample_size;
813 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
814 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
815 radeon_emit(cs, va);
816 radeon_emit(cs, va >> 32);
817
818 fence_va = va + sample_size;
819 break;
820 }
821 default:
822 assert(0);
823 }
824 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
825 RADEON_PRIO_QUERY);
826
827 if (fence_va)
828 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
829 query->buffer.buf, fence_va, 0, 0x80000000);
830 }
831
832 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
833 struct r600_query_hw *query)
834 {
835 uint64_t va;
836
837 if (!query->buffer.buf)
838 return; // previous buffer allocation failure
839
840 /* The queries which need begin already called this in begin_query. */
841 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
842 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
843 }
844
845 /* emit end query */
846 va = query->buffer.buf->gpu_address + query->buffer.results_end;
847
848 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
849
850 query->buffer.results_end += query->result_size;
851
852 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
853 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
854
855 r600_update_occlusion_query_state(ctx, query->b.type, -1);
856 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
857 }
858
859 static void r600_emit_query_predication(struct r600_common_context *ctx,
860 struct r600_atom *atom)
861 {
862 struct radeon_winsys_cs *cs = ctx->gfx.cs;
863 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
864 struct r600_query_buffer *qbuf;
865 uint32_t op;
866 bool flag_wait;
867
868 if (!query)
869 return;
870
871 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
872 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
873
874 switch (query->b.type) {
875 case PIPE_QUERY_OCCLUSION_COUNTER:
876 case PIPE_QUERY_OCCLUSION_PREDICATE:
877 op = PRED_OP(PREDICATION_OP_ZPASS);
878 break;
879 case PIPE_QUERY_PRIMITIVES_EMITTED:
880 case PIPE_QUERY_PRIMITIVES_GENERATED:
881 case PIPE_QUERY_SO_STATISTICS:
882 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
883 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
884 break;
885 default:
886 assert(0);
887 return;
888 }
889
890 /* if true then invert, see GL_ARB_conditional_render_inverted */
891 if (ctx->render_cond_invert)
892 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
893 else
894 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
895
896 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
897
898 /* emit predicate packets for all data blocks */
899 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
900 unsigned results_base = 0;
901 uint64_t va_base = qbuf->buf->gpu_address;
902
903 while (results_base < qbuf->results_end) {
904 uint64_t va = va_base + results_base;
905
906 if (ctx->chip_class >= GFX9) {
907 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
908 radeon_emit(cs, op);
909 radeon_emit(cs, va);
910 radeon_emit(cs, va >> 32);
911 } else {
912 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
913 radeon_emit(cs, va);
914 radeon_emit(cs, op | ((va >> 32) & 0xFF));
915 }
916 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
917 RADEON_PRIO_QUERY);
918 results_base += query->result_size;
919
920 /* set CONTINUE bit for all packets except the first */
921 op |= PREDICATION_CONTINUE;
922 }
923 }
924 }
925
926 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
927 {
928 struct r600_common_screen *rscreen =
929 (struct r600_common_screen *)ctx->screen;
930
931 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
932 query_type == PIPE_QUERY_GPU_FINISHED ||
933 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
934 return r600_query_sw_create(query_type);
935
936 return r600_query_hw_create(rscreen, query_type, index);
937 }
938
939 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
940 {
941 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
942 struct r600_query *rquery = (struct r600_query *)query;
943
944 rquery->ops->destroy(rctx->screen, rquery);
945 }
946
947 static boolean r600_begin_query(struct pipe_context *ctx,
948 struct pipe_query *query)
949 {
950 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
951 struct r600_query *rquery = (struct r600_query *)query;
952
953 return rquery->ops->begin(rctx, rquery);
954 }
955
956 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
957 struct r600_query_hw *query)
958 {
959 struct r600_query_buffer *prev = query->buffer.previous;
960
961 /* Discard the old query buffers. */
962 while (prev) {
963 struct r600_query_buffer *qbuf = prev;
964 prev = prev->previous;
965 r600_resource_reference(&qbuf->buf, NULL);
966 FREE(qbuf);
967 }
968
969 query->buffer.results_end = 0;
970 query->buffer.previous = NULL;
971
972 /* Obtain a new buffer if the current one can't be mapped without a stall. */
973 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
974 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
975 r600_resource_reference(&query->buffer.buf, NULL);
976 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
977 } else {
978 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
979 r600_resource_reference(&query->buffer.buf, NULL);
980 }
981 }
982
983 bool r600_query_hw_begin(struct r600_common_context *rctx,
984 struct r600_query *rquery)
985 {
986 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
987
988 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
989 assert(0);
990 return false;
991 }
992
993 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
994 r600_query_hw_reset_buffers(rctx, query);
995
996 r600_query_hw_emit_start(rctx, query);
997 if (!query->buffer.buf)
998 return false;
999
1000 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1001 return true;
1002 }
1003
1004 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1005 {
1006 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1007 struct r600_query *rquery = (struct r600_query *)query;
1008
1009 return rquery->ops->end(rctx, rquery);
1010 }
1011
1012 bool r600_query_hw_end(struct r600_common_context *rctx,
1013 struct r600_query *rquery)
1014 {
1015 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1016
1017 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1018 r600_query_hw_reset_buffers(rctx, query);
1019
1020 r600_query_hw_emit_stop(rctx, query);
1021
1022 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1023 LIST_DELINIT(&query->list);
1024
1025 if (!query->buffer.buf)
1026 return false;
1027
1028 return true;
1029 }
1030
1031 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1032 struct r600_query_hw *rquery, int index,
1033 struct r600_hw_query_params *params)
1034 {
1035 unsigned max_rbs = rctx->screen->info.num_render_backends;
1036
1037 params->pair_stride = 0;
1038 params->pair_count = 1;
1039
1040 switch (rquery->b.type) {
1041 case PIPE_QUERY_OCCLUSION_COUNTER:
1042 case PIPE_QUERY_OCCLUSION_PREDICATE:
1043 params->start_offset = 0;
1044 params->end_offset = 8;
1045 params->fence_offset = max_rbs * 16;
1046 params->pair_stride = 16;
1047 params->pair_count = max_rbs;
1048 break;
1049 case PIPE_QUERY_TIME_ELAPSED:
1050 params->start_offset = 0;
1051 params->end_offset = 8;
1052 params->fence_offset = 16;
1053 break;
1054 case PIPE_QUERY_TIMESTAMP:
1055 params->start_offset = 0;
1056 params->end_offset = 0;
1057 params->fence_offset = 8;
1058 break;
1059 case PIPE_QUERY_PRIMITIVES_EMITTED:
1060 params->start_offset = 8;
1061 params->end_offset = 24;
1062 params->fence_offset = params->end_offset + 4;
1063 break;
1064 case PIPE_QUERY_PRIMITIVES_GENERATED:
1065 params->start_offset = 0;
1066 params->end_offset = 16;
1067 params->fence_offset = params->end_offset + 4;
1068 break;
1069 case PIPE_QUERY_SO_STATISTICS:
1070 params->start_offset = 8 - index * 8;
1071 params->end_offset = 24 - index * 8;
1072 params->fence_offset = params->end_offset + 4;
1073 break;
1074 case PIPE_QUERY_PIPELINE_STATISTICS:
1075 {
1076 /* Offsets apply to EG+ */
1077 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1078 params->start_offset = offsets[index];
1079 params->end_offset = 88 + offsets[index];
1080 params->fence_offset = 2 * 88;
1081 break;
1082 }
1083 default:
1084 unreachable("r600_get_hw_query_params unsupported");
1085 }
1086 }
1087
1088 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1089 bool test_status_bit)
1090 {
1091 uint32_t *current_result = (uint32_t*)map;
1092 uint64_t start, end;
1093
1094 start = (uint64_t)current_result[start_index] |
1095 (uint64_t)current_result[start_index+1] << 32;
1096 end = (uint64_t)current_result[end_index] |
1097 (uint64_t)current_result[end_index+1] << 32;
1098
1099 if (!test_status_bit ||
1100 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1101 return end - start;
1102 }
1103 return 0;
1104 }
1105
1106 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1107 struct r600_query_hw *query,
1108 void *buffer,
1109 union pipe_query_result *result)
1110 {
1111 unsigned max_rbs = rscreen->info.num_render_backends;
1112
1113 switch (query->b.type) {
1114 case PIPE_QUERY_OCCLUSION_COUNTER: {
1115 for (unsigned i = 0; i < max_rbs; ++i) {
1116 unsigned results_base = i * 16;
1117 result->u64 +=
1118 r600_query_read_result(buffer + results_base, 0, 2, true);
1119 }
1120 break;
1121 }
1122 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1123 for (unsigned i = 0; i < max_rbs; ++i) {
1124 unsigned results_base = i * 16;
1125 result->b = result->b ||
1126 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1127 }
1128 break;
1129 }
1130 case PIPE_QUERY_TIME_ELAPSED:
1131 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1132 break;
1133 case PIPE_QUERY_TIMESTAMP:
1134 result->u64 = *(uint64_t*)buffer;
1135 break;
1136 case PIPE_QUERY_PRIMITIVES_EMITTED:
1137 /* SAMPLE_STREAMOUTSTATS stores this structure:
1138 * {
1139 * u64 NumPrimitivesWritten;
1140 * u64 PrimitiveStorageNeeded;
1141 * }
1142 * We only need NumPrimitivesWritten here. */
1143 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1144 break;
1145 case PIPE_QUERY_PRIMITIVES_GENERATED:
1146 /* Here we read PrimitiveStorageNeeded. */
1147 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1148 break;
1149 case PIPE_QUERY_SO_STATISTICS:
1150 result->so_statistics.num_primitives_written +=
1151 r600_query_read_result(buffer, 2, 6, true);
1152 result->so_statistics.primitives_storage_needed +=
1153 r600_query_read_result(buffer, 0, 4, true);
1154 break;
1155 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1156 result->b = result->b ||
1157 r600_query_read_result(buffer, 2, 6, true) !=
1158 r600_query_read_result(buffer, 0, 4, true);
1159 break;
1160 case PIPE_QUERY_PIPELINE_STATISTICS:
1161 if (rscreen->chip_class >= EVERGREEN) {
1162 result->pipeline_statistics.ps_invocations +=
1163 r600_query_read_result(buffer, 0, 22, false);
1164 result->pipeline_statistics.c_primitives +=
1165 r600_query_read_result(buffer, 2, 24, false);
1166 result->pipeline_statistics.c_invocations +=
1167 r600_query_read_result(buffer, 4, 26, false);
1168 result->pipeline_statistics.vs_invocations +=
1169 r600_query_read_result(buffer, 6, 28, false);
1170 result->pipeline_statistics.gs_invocations +=
1171 r600_query_read_result(buffer, 8, 30, false);
1172 result->pipeline_statistics.gs_primitives +=
1173 r600_query_read_result(buffer, 10, 32, false);
1174 result->pipeline_statistics.ia_primitives +=
1175 r600_query_read_result(buffer, 12, 34, false);
1176 result->pipeline_statistics.ia_vertices +=
1177 r600_query_read_result(buffer, 14, 36, false);
1178 result->pipeline_statistics.hs_invocations +=
1179 r600_query_read_result(buffer, 16, 38, false);
1180 result->pipeline_statistics.ds_invocations +=
1181 r600_query_read_result(buffer, 18, 40, false);
1182 result->pipeline_statistics.cs_invocations +=
1183 r600_query_read_result(buffer, 20, 42, false);
1184 } else {
1185 result->pipeline_statistics.ps_invocations +=
1186 r600_query_read_result(buffer, 0, 16, false);
1187 result->pipeline_statistics.c_primitives +=
1188 r600_query_read_result(buffer, 2, 18, false);
1189 result->pipeline_statistics.c_invocations +=
1190 r600_query_read_result(buffer, 4, 20, false);
1191 result->pipeline_statistics.vs_invocations +=
1192 r600_query_read_result(buffer, 6, 22, false);
1193 result->pipeline_statistics.gs_invocations +=
1194 r600_query_read_result(buffer, 8, 24, false);
1195 result->pipeline_statistics.gs_primitives +=
1196 r600_query_read_result(buffer, 10, 26, false);
1197 result->pipeline_statistics.ia_primitives +=
1198 r600_query_read_result(buffer, 12, 28, false);
1199 result->pipeline_statistics.ia_vertices +=
1200 r600_query_read_result(buffer, 14, 30, false);
1201 }
1202 #if 0 /* for testing */
1203 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1204 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1205 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1206 result->pipeline_statistics.ia_vertices,
1207 result->pipeline_statistics.ia_primitives,
1208 result->pipeline_statistics.vs_invocations,
1209 result->pipeline_statistics.hs_invocations,
1210 result->pipeline_statistics.ds_invocations,
1211 result->pipeline_statistics.gs_invocations,
1212 result->pipeline_statistics.gs_primitives,
1213 result->pipeline_statistics.c_invocations,
1214 result->pipeline_statistics.c_primitives,
1215 result->pipeline_statistics.ps_invocations,
1216 result->pipeline_statistics.cs_invocations);
1217 #endif
1218 break;
1219 default:
1220 assert(0);
1221 }
1222 }
1223
1224 static boolean r600_get_query_result(struct pipe_context *ctx,
1225 struct pipe_query *query, boolean wait,
1226 union pipe_query_result *result)
1227 {
1228 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1229 struct r600_query *rquery = (struct r600_query *)query;
1230
1231 return rquery->ops->get_result(rctx, rquery, wait, result);
1232 }
1233
1234 static void r600_get_query_result_resource(struct pipe_context *ctx,
1235 struct pipe_query *query,
1236 boolean wait,
1237 enum pipe_query_value_type result_type,
1238 int index,
1239 struct pipe_resource *resource,
1240 unsigned offset)
1241 {
1242 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1243 struct r600_query *rquery = (struct r600_query *)query;
1244
1245 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1246 resource, offset);
1247 }
1248
1249 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1250 union pipe_query_result *result)
1251 {
1252 util_query_clear_result(result, query->b.type);
1253 }
1254
1255 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1256 struct r600_query *rquery,
1257 bool wait, union pipe_query_result *result)
1258 {
1259 struct r600_common_screen *rscreen = rctx->screen;
1260 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1261 struct r600_query_buffer *qbuf;
1262
1263 query->ops->clear_result(query, result);
1264
1265 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1266 unsigned usage = PIPE_TRANSFER_READ |
1267 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1268 unsigned results_base = 0;
1269 void *map;
1270
1271 if (rquery->b.flushed)
1272 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1273 else
1274 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1275
1276 if (!map)
1277 return false;
1278
1279 while (results_base != qbuf->results_end) {
1280 query->ops->add_result(rscreen, query, map + results_base,
1281 result);
1282 results_base += query->result_size;
1283 }
1284 }
1285
1286 /* Convert the time to expected units. */
1287 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1288 rquery->type == PIPE_QUERY_TIMESTAMP) {
1289 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1290 }
1291 return true;
1292 }
1293
1294 /* Create the compute shader that is used to collect the results.
1295 *
1296 * One compute grid with a single thread is launched for every query result
1297 * buffer. The thread (optionally) reads a previous summary buffer, then
1298 * accumulates data from the query result buffer, and writes the result either
1299 * to a summary buffer to be consumed by the next grid invocation or to the
1300 * user-supplied buffer.
1301 *
1302 * Data layout:
1303 *
1304 * CONST
1305 * 0.x = end_offset
1306 * 0.y = result_stride
1307 * 0.z = result_count
1308 * 0.w = bit field:
1309 * 1: read previously accumulated values
1310 * 2: write accumulated values for chaining
1311 * 4: write result available
1312 * 8: convert result to boolean (0/1)
1313 * 16: only read one dword and use that as result
1314 * 32: apply timestamp conversion
1315 * 64: store full 64 bits result
1316 * 128: store signed 32 bits result
1317 * 1.x = fence_offset
1318 * 1.y = pair_stride
1319 * 1.z = pair_count
1320 *
1321 * BUFFER[0] = query result buffer
1322 * BUFFER[1] = previous summary buffer
1323 * BUFFER[2] = next summary buffer or user-supplied buffer
1324 */
1325 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1326 {
1327 /* TEMP[0].xy = accumulated result so far
1328 * TEMP[0].z = result not available
1329 *
1330 * TEMP[1].x = current result index
1331 * TEMP[1].y = current pair index
1332 */
1333 static const char text_tmpl[] =
1334 "COMP\n"
1335 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1336 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1337 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1338 "DCL BUFFER[0]\n"
1339 "DCL BUFFER[1]\n"
1340 "DCL BUFFER[2]\n"
1341 "DCL CONST[0..1]\n"
1342 "DCL TEMP[0..5]\n"
1343 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1344 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1345 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1346 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1347
1348 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1349 "UIF TEMP[5]\n"
1350 /* Check result availability. */
1351 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1352 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1353 "MOV TEMP[1], TEMP[0].zzzz\n"
1354 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1355
1356 /* Load result if available. */
1357 "UIF TEMP[1]\n"
1358 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1359 "ENDIF\n"
1360 "ELSE\n"
1361 /* Load previously accumulated result if requested. */
1362 "MOV TEMP[0], IMM[0].xxxx\n"
1363 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1364 "UIF TEMP[4]\n"
1365 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1366 "ENDIF\n"
1367
1368 "MOV TEMP[1].x, IMM[0].xxxx\n"
1369 "BGNLOOP\n"
1370 /* Break if accumulated result so far is not available. */
1371 "UIF TEMP[0].zzzz\n"
1372 "BRK\n"
1373 "ENDIF\n"
1374
1375 /* Break if result_index >= result_count. */
1376 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1377 "UIF TEMP[5]\n"
1378 "BRK\n"
1379 "ENDIF\n"
1380
1381 /* Load fence and check result availability */
1382 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1383 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1384 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1385 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1386 "UIF TEMP[0].zzzz\n"
1387 "BRK\n"
1388 "ENDIF\n"
1389
1390 "MOV TEMP[1].y, IMM[0].xxxx\n"
1391 "BGNLOOP\n"
1392 /* Load start and end. */
1393 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1394 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1395 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1396
1397 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1398 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1399
1400 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1401 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1402
1403 /* Increment pair index */
1404 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1405 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1406 "UIF TEMP[5]\n"
1407 "BRK\n"
1408 "ENDIF\n"
1409 "ENDLOOP\n"
1410
1411 /* Increment result index */
1412 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1413 "ENDLOOP\n"
1414 "ENDIF\n"
1415
1416 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1417 "UIF TEMP[4]\n"
1418 /* Store accumulated data for chaining. */
1419 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1420 "ELSE\n"
1421 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1422 "UIF TEMP[4]\n"
1423 /* Store result availability. */
1424 "NOT TEMP[0].z, TEMP[0]\n"
1425 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1426 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1427
1428 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1429 "UIF TEMP[4]\n"
1430 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1431 "ENDIF\n"
1432 "ELSE\n"
1433 /* Store result if it is available. */
1434 "NOT TEMP[4], TEMP[0].zzzz\n"
1435 "UIF TEMP[4]\n"
1436 /* Apply timestamp conversion */
1437 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1438 "UIF TEMP[4]\n"
1439 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1440 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1441 "ENDIF\n"
1442
1443 /* Convert to boolean */
1444 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1445 "UIF TEMP[4]\n"
1446 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1447 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1448 "MOV TEMP[0].y, IMM[0].xxxx\n"
1449 "ENDIF\n"
1450
1451 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1452 "UIF TEMP[4]\n"
1453 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1454 "ELSE\n"
1455 /* Clamping */
1456 "UIF TEMP[0].yyyy\n"
1457 "MOV TEMP[0].x, IMM[0].wwww\n"
1458 "ENDIF\n"
1459
1460 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1461 "UIF TEMP[4]\n"
1462 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1463 "ENDIF\n"
1464
1465 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1466 "ENDIF\n"
1467 "ENDIF\n"
1468 "ENDIF\n"
1469 "ENDIF\n"
1470
1471 "END\n";
1472
1473 char text[sizeof(text_tmpl) + 32];
1474 struct tgsi_token tokens[1024];
1475 struct pipe_compute_state state = {};
1476
1477 /* Hard code the frequency into the shader so that the backend can
1478 * use the full range of optimizations for divide-by-constant.
1479 */
1480 snprintf(text, sizeof(text), text_tmpl,
1481 rctx->screen->info.clock_crystal_freq);
1482
1483 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1484 assert(false);
1485 return;
1486 }
1487
1488 state.ir_type = PIPE_SHADER_IR_TGSI;
1489 state.prog = tokens;
1490
1491 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1492 }
1493
1494 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1495 struct r600_qbo_state *st)
1496 {
1497 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1498
1499 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1500 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1501
1502 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1503 for (unsigned i = 0; i < 3; ++i)
1504 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1505 }
1506
1507 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1508 struct r600_query *rquery,
1509 bool wait,
1510 enum pipe_query_value_type result_type,
1511 int index,
1512 struct pipe_resource *resource,
1513 unsigned offset)
1514 {
1515 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1516 struct r600_query_buffer *qbuf;
1517 struct r600_query_buffer *qbuf_prev;
1518 struct pipe_resource *tmp_buffer = NULL;
1519 unsigned tmp_buffer_offset = 0;
1520 struct r600_qbo_state saved_state = {};
1521 struct pipe_grid_info grid = {};
1522 struct pipe_constant_buffer constant_buffer = {};
1523 struct pipe_shader_buffer ssbo[3];
1524 struct r600_hw_query_params params;
1525 struct {
1526 uint32_t end_offset;
1527 uint32_t result_stride;
1528 uint32_t result_count;
1529 uint32_t config;
1530 uint32_t fence_offset;
1531 uint32_t pair_stride;
1532 uint32_t pair_count;
1533 } consts;
1534
1535 if (!rctx->query_result_shader) {
1536 r600_create_query_result_shader(rctx);
1537 if (!rctx->query_result_shader)
1538 return;
1539 }
1540
1541 if (query->buffer.previous) {
1542 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1543 &tmp_buffer_offset, &tmp_buffer);
1544 if (!tmp_buffer)
1545 return;
1546 }
1547
1548 rctx->save_qbo_state(&rctx->b, &saved_state);
1549
1550 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1551 consts.end_offset = params.end_offset - params.start_offset;
1552 consts.fence_offset = params.fence_offset - params.start_offset;
1553 consts.result_stride = query->result_size;
1554 consts.pair_stride = params.pair_stride;
1555 consts.pair_count = params.pair_count;
1556
1557 constant_buffer.buffer_size = sizeof(consts);
1558 constant_buffer.user_buffer = &consts;
1559
1560 ssbo[1].buffer = tmp_buffer;
1561 ssbo[1].buffer_offset = tmp_buffer_offset;
1562 ssbo[1].buffer_size = 16;
1563
1564 ssbo[2] = ssbo[1];
1565
1566 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1567
1568 grid.block[0] = 1;
1569 grid.block[1] = 1;
1570 grid.block[2] = 1;
1571 grid.grid[0] = 1;
1572 grid.grid[1] = 1;
1573 grid.grid[2] = 1;
1574
1575 consts.config = 0;
1576 if (index < 0)
1577 consts.config |= 4;
1578 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1579 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1580 consts.config |= 8;
1581 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1582 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1583 consts.config |= 32;
1584
1585 switch (result_type) {
1586 case PIPE_QUERY_TYPE_U64:
1587 case PIPE_QUERY_TYPE_I64:
1588 consts.config |= 64;
1589 break;
1590 case PIPE_QUERY_TYPE_I32:
1591 consts.config |= 128;
1592 break;
1593 case PIPE_QUERY_TYPE_U32:
1594 break;
1595 }
1596
1597 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1598
1599 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1600 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1601 qbuf_prev = qbuf->previous;
1602 consts.result_count = qbuf->results_end / query->result_size;
1603 consts.config &= ~3;
1604 if (qbuf != &query->buffer)
1605 consts.config |= 1;
1606 if (qbuf->previous)
1607 consts.config |= 2;
1608 } else {
1609 /* Only read the last timestamp. */
1610 qbuf_prev = NULL;
1611 consts.result_count = 0;
1612 consts.config |= 16;
1613 params.start_offset += qbuf->results_end - query->result_size;
1614 }
1615
1616 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1617
1618 ssbo[0].buffer = &qbuf->buf->b.b;
1619 ssbo[0].buffer_offset = params.start_offset;
1620 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1621
1622 if (!qbuf->previous) {
1623 ssbo[2].buffer = resource;
1624 ssbo[2].buffer_offset = offset;
1625 ssbo[2].buffer_size = 8;
1626
1627 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1628 }
1629
1630 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1631
1632 if (wait && qbuf == &query->buffer) {
1633 uint64_t va;
1634
1635 /* Wait for result availability. Wait only for readiness
1636 * of the last entry, since the fence writes should be
1637 * serialized in the CP.
1638 */
1639 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1640 va += params.fence_offset;
1641
1642 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1643 }
1644
1645 rctx->b.launch_grid(&rctx->b, &grid);
1646 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1647 }
1648
1649 r600_restore_qbo_state(rctx, &saved_state);
1650 pipe_resource_reference(&tmp_buffer, NULL);
1651 }
1652
1653 static void r600_render_condition(struct pipe_context *ctx,
1654 struct pipe_query *query,
1655 boolean condition,
1656 enum pipe_render_cond_flag mode)
1657 {
1658 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1659 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1660 struct r600_query_buffer *qbuf;
1661 struct r600_atom *atom = &rctx->render_cond_atom;
1662
1663 rctx->render_cond = query;
1664 rctx->render_cond_invert = condition;
1665 rctx->render_cond_mode = mode;
1666
1667 /* Compute the size of SET_PREDICATION packets. */
1668 atom->num_dw = 0;
1669 if (query) {
1670 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1671 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1672 }
1673
1674 rctx->set_atom_dirty(rctx, atom, query != NULL);
1675 }
1676
1677 void r600_suspend_queries(struct r600_common_context *ctx)
1678 {
1679 struct r600_query_hw *query;
1680
1681 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1682 r600_query_hw_emit_stop(ctx, query);
1683 }
1684 assert(ctx->num_cs_dw_queries_suspend == 0);
1685 }
1686
1687 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1688 struct list_head *query_list)
1689 {
1690 struct r600_query_hw *query;
1691 unsigned num_dw = 0;
1692
1693 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1694 /* begin + end */
1695 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1696
1697 /* Workaround for the fact that
1698 * num_cs_dw_nontimer_queries_suspend is incremented for every
1699 * resumed query, which raises the bar in need_cs_space for
1700 * queries about to be resumed.
1701 */
1702 num_dw += query->num_cs_dw_end;
1703 }
1704 /* primitives generated query */
1705 num_dw += ctx->streamout.enable_atom.num_dw;
1706 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1707 num_dw += 13;
1708
1709 return num_dw;
1710 }
1711
1712 void r600_resume_queries(struct r600_common_context *ctx)
1713 {
1714 struct r600_query_hw *query;
1715 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1716
1717 assert(ctx->num_cs_dw_queries_suspend == 0);
1718
1719 /* Check CS space here. Resuming must not be interrupted by flushes. */
1720 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1721
1722 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1723 r600_query_hw_emit_start(ctx, query);
1724 }
1725 }
1726
1727 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1728 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1729 {
1730 struct r600_common_context *ctx =
1731 (struct r600_common_context*)rscreen->aux_context;
1732 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1733 struct r600_resource *buffer;
1734 uint32_t *results;
1735 unsigned i, mask = 0;
1736 unsigned max_rbs = ctx->screen->info.num_render_backends;
1737
1738 assert(rscreen->chip_class <= CAYMAN);
1739
1740 /* if backend_map query is supported by the kernel */
1741 if (rscreen->info.r600_gb_backend_map_valid) {
1742 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1743 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1744 unsigned item_width, item_mask;
1745
1746 if (ctx->chip_class >= EVERGREEN) {
1747 item_width = 4;
1748 item_mask = 0x7;
1749 } else {
1750 item_width = 2;
1751 item_mask = 0x3;
1752 }
1753
1754 while (num_tile_pipes--) {
1755 i = backend_map & item_mask;
1756 mask |= (1<<i);
1757 backend_map >>= item_width;
1758 }
1759 if (mask != 0) {
1760 rscreen->info.enabled_rb_mask = mask;
1761 return;
1762 }
1763 }
1764
1765 /* otherwise backup path for older kernels */
1766
1767 /* create buffer for event data */
1768 buffer = (struct r600_resource*)
1769 pipe_buffer_create(ctx->b.screen, 0,
1770 PIPE_USAGE_STAGING, max_rbs * 16);
1771 if (!buffer)
1772 return;
1773
1774 /* initialize buffer with zeroes */
1775 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1776 if (results) {
1777 memset(results, 0, max_rbs * 4 * 4);
1778
1779 /* emit EVENT_WRITE for ZPASS_DONE */
1780 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1781 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1782 radeon_emit(cs, buffer->gpu_address);
1783 radeon_emit(cs, buffer->gpu_address >> 32);
1784
1785 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1786 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1787
1788 /* analyze results */
1789 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1790 if (results) {
1791 for(i = 0; i < max_rbs; i++) {
1792 /* at least highest bit will be set if backend is used */
1793 if (results[i*4 + 1])
1794 mask |= (1<<i);
1795 }
1796 }
1797 }
1798
1799 r600_resource_reference(&buffer, NULL);
1800
1801 if (mask)
1802 rscreen->info.enabled_rb_mask = mask;
1803 }
1804
1805 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1806 { \
1807 .name = name_, \
1808 .query_type = R600_QUERY_##query_type_, \
1809 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1810 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1811 .group_id = group_id_ \
1812 }
1813
1814 #define X(name_, query_type_, type_, result_type_) \
1815 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1816
1817 #define XG(group_, name_, query_type_, type_, result_type_) \
1818 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1819
1820 static struct pipe_driver_query_info r600_driver_query_list[] = {
1821 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1822 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1823 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1824 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1825 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1826 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1827 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1828 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1829 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1830 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1831 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1832 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1833 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1834 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1835 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1836 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1837 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1838 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1839 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1840 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1841 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1842 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1843 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1844 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1845 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1846 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1847 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1848 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1849 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1850 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1851 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1852 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1853 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1854 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1855 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1856 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1857
1858 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1859 * which use it as a fallback path to detect the GPU type.
1860 *
1861 * Note: The names of these queries are significant for GPUPerfStudio
1862 * (and possibly their order as well). */
1863 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1864 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1865 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1866 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1867 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1868
1869 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1870 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1871 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1872
1873 /* The following queries must be at the end of the list because their
1874 * availability is adjusted dynamically based on the DRM version. */
1875 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1876 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1877 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1878 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1879 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1880 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1881 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1882 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1883 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1884 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1885 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1886 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1887 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1888 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1889 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1890 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1891 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1892 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1893 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1894 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1895 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1896 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1897 };
1898
1899 #undef X
1900 #undef XG
1901 #undef XFULL
1902
1903 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1904 {
1905 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1906 return ARRAY_SIZE(r600_driver_query_list);
1907 else if (rscreen->info.drm_major == 3) {
1908 if (rscreen->chip_class >= VI)
1909 return ARRAY_SIZE(r600_driver_query_list);
1910 else
1911 return ARRAY_SIZE(r600_driver_query_list) - 7;
1912 }
1913 else
1914 return ARRAY_SIZE(r600_driver_query_list) - 25;
1915 }
1916
1917 static int r600_get_driver_query_info(struct pipe_screen *screen,
1918 unsigned index,
1919 struct pipe_driver_query_info *info)
1920 {
1921 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1922 unsigned num_queries = r600_get_num_queries(rscreen);
1923
1924 if (!info) {
1925 unsigned num_perfcounters =
1926 r600_get_perfcounter_info(rscreen, 0, NULL);
1927
1928 return num_queries + num_perfcounters;
1929 }
1930
1931 if (index >= num_queries)
1932 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1933
1934 *info = r600_driver_query_list[index];
1935
1936 switch (info->query_type) {
1937 case R600_QUERY_REQUESTED_VRAM:
1938 case R600_QUERY_VRAM_USAGE:
1939 case R600_QUERY_MAPPED_VRAM:
1940 info->max_value.u64 = rscreen->info.vram_size;
1941 break;
1942 case R600_QUERY_REQUESTED_GTT:
1943 case R600_QUERY_GTT_USAGE:
1944 case R600_QUERY_MAPPED_GTT:
1945 info->max_value.u64 = rscreen->info.gart_size;
1946 break;
1947 case R600_QUERY_GPU_TEMPERATURE:
1948 info->max_value.u64 = 125;
1949 break;
1950 case R600_QUERY_VRAM_VIS_USAGE:
1951 info->max_value.u64 = rscreen->info.vram_vis_size;
1952 break;
1953 }
1954
1955 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1956 info->group_id += rscreen->perfcounters->num_groups;
1957
1958 return 1;
1959 }
1960
1961 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1962 * performance counter groups, so be careful when changing this and related
1963 * functions.
1964 */
1965 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1966 unsigned index,
1967 struct pipe_driver_query_group_info *info)
1968 {
1969 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1970 unsigned num_pc_groups = 0;
1971
1972 if (rscreen->perfcounters)
1973 num_pc_groups = rscreen->perfcounters->num_groups;
1974
1975 if (!info)
1976 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1977
1978 if (index < num_pc_groups)
1979 return r600_get_perfcounter_group_info(rscreen, index, info);
1980
1981 index -= num_pc_groups;
1982 if (index >= R600_NUM_SW_QUERY_GROUPS)
1983 return 0;
1984
1985 info->name = "GPIN";
1986 info->max_active_queries = 5;
1987 info->num_queries = 5;
1988 return 1;
1989 }
1990
1991 void r600_query_init(struct r600_common_context *rctx)
1992 {
1993 rctx->b.create_query = r600_create_query;
1994 rctx->b.create_batch_query = r600_create_batch_query;
1995 rctx->b.destroy_query = r600_destroy_query;
1996 rctx->b.begin_query = r600_begin_query;
1997 rctx->b.end_query = r600_end_query;
1998 rctx->b.get_query_result = r600_get_query_result;
1999 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2000 rctx->render_cond_atom.emit = r600_emit_query_predication;
2001
2002 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2003 rctx->b.render_condition = r600_render_condition;
2004
2005 LIST_INITHEAD(&rctx->active_queries);
2006 }
2007
2008 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2009 {
2010 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2011 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2012 }