radeonsi: add a HUD query for getting an average GFX BO list size
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46
47 uint64_t begin_time;
48 uint64_t end_time;
49
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle *fence;
52 };
53
54 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
55 struct r600_query *rquery)
56 {
57 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
58
59 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
60 FREE(query);
61 }
62
63 static enum radeon_value_id winsys_id_from_type(unsigned type)
64 {
65 switch (type) {
66 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
67 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
68 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
69 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
70 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
71 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
72 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
73 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
74 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
75 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
76 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
77 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
78 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
79 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
80 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
81 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
82 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
83 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
84 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
85 default: unreachable("query type does not correspond to winsys id");
86 }
87 }
88
89 static bool r600_query_sw_begin(struct r600_common_context *rctx,
90 struct r600_query *rquery)
91 {
92 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
93 enum radeon_value_id ws_id;
94
95 switch(query->b.type) {
96 case PIPE_QUERY_TIMESTAMP_DISJOINT:
97 case PIPE_QUERY_GPU_FINISHED:
98 break;
99 case R600_QUERY_DRAW_CALLS:
100 query->begin_result = rctx->num_draw_calls;
101 break;
102 case R600_QUERY_PRIM_RESTART_CALLS:
103 query->begin_result = rctx->num_prim_restart_calls;
104 break;
105 case R600_QUERY_SPILL_DRAW_CALLS:
106 query->begin_result = rctx->num_spill_draw_calls;
107 break;
108 case R600_QUERY_COMPUTE_CALLS:
109 query->begin_result = rctx->num_compute_calls;
110 break;
111 case R600_QUERY_SPILL_COMPUTE_CALLS:
112 query->begin_result = rctx->num_spill_compute_calls;
113 break;
114 case R600_QUERY_DMA_CALLS:
115 query->begin_result = rctx->num_dma_calls;
116 break;
117 case R600_QUERY_CP_DMA_CALLS:
118 query->begin_result = rctx->num_cp_dma_calls;
119 break;
120 case R600_QUERY_NUM_VS_FLUSHES:
121 query->begin_result = rctx->num_vs_flushes;
122 break;
123 case R600_QUERY_NUM_PS_FLUSHES:
124 query->begin_result = rctx->num_ps_flushes;
125 break;
126 case R600_QUERY_NUM_CS_FLUSHES:
127 query->begin_result = rctx->num_cs_flushes;
128 break;
129 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
130 query->begin_result = rctx->num_cb_cache_flushes;
131 break;
132 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
133 query->begin_result = rctx->num_db_cache_flushes;
134 break;
135 case R600_QUERY_NUM_L2_INVALIDATES:
136 query->begin_result = rctx->num_L2_invalidates;
137 break;
138 case R600_QUERY_NUM_L2_WRITEBACKS:
139 query->begin_result = rctx->num_L2_writebacks;
140 break;
141 case R600_QUERY_NUM_RESIDENT_HANDLES:
142 query->begin_result = rctx->num_resident_handles;
143 break;
144 case R600_QUERY_TC_OFFLOADED_SLOTS:
145 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
146 break;
147 case R600_QUERY_TC_DIRECT_SLOTS:
148 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
149 break;
150 case R600_QUERY_TC_NUM_SYNCS:
151 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
152 break;
153 case R600_QUERY_REQUESTED_VRAM:
154 case R600_QUERY_REQUESTED_GTT:
155 case R600_QUERY_MAPPED_VRAM:
156 case R600_QUERY_MAPPED_GTT:
157 case R600_QUERY_VRAM_USAGE:
158 case R600_QUERY_VRAM_VIS_USAGE:
159 case R600_QUERY_GTT_USAGE:
160 case R600_QUERY_GPU_TEMPERATURE:
161 case R600_QUERY_CURRENT_GPU_SCLK:
162 case R600_QUERY_CURRENT_GPU_MCLK:
163 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
164 case R600_QUERY_NUM_MAPPED_BUFFERS:
165 query->begin_result = 0;
166 break;
167 case R600_QUERY_BUFFER_WAIT_TIME:
168 case R600_QUERY_NUM_GFX_IBS:
169 case R600_QUERY_NUM_SDMA_IBS:
170 case R600_QUERY_NUM_BYTES_MOVED:
171 case R600_QUERY_NUM_EVICTIONS:
172 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
173 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
174 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
175 break;
176 }
177 case R600_QUERY_GFX_BO_LIST_SIZE:
178 ws_id = winsys_id_from_type(query->b.type);
179 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
180 query->begin_time = rctx->ws->query_value(rctx->ws,
181 RADEON_NUM_GFX_IBS);
182 break;
183 case R600_QUERY_CS_THREAD_BUSY:
184 ws_id = winsys_id_from_type(query->b.type);
185 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
186 query->begin_time = os_time_get_nano();
187 break;
188 case R600_QUERY_GALLIUM_THREAD_BUSY:
189 query->begin_result =
190 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
191 query->begin_time = os_time_get_nano();
192 break;
193 case R600_QUERY_GPU_LOAD:
194 case R600_QUERY_GPU_SHADERS_BUSY:
195 case R600_QUERY_GPU_TA_BUSY:
196 case R600_QUERY_GPU_GDS_BUSY:
197 case R600_QUERY_GPU_VGT_BUSY:
198 case R600_QUERY_GPU_IA_BUSY:
199 case R600_QUERY_GPU_SX_BUSY:
200 case R600_QUERY_GPU_WD_BUSY:
201 case R600_QUERY_GPU_BCI_BUSY:
202 case R600_QUERY_GPU_SC_BUSY:
203 case R600_QUERY_GPU_PA_BUSY:
204 case R600_QUERY_GPU_DB_BUSY:
205 case R600_QUERY_GPU_CP_BUSY:
206 case R600_QUERY_GPU_CB_BUSY:
207 case R600_QUERY_GPU_SDMA_BUSY:
208 case R600_QUERY_GPU_PFP_BUSY:
209 case R600_QUERY_GPU_MEQ_BUSY:
210 case R600_QUERY_GPU_ME_BUSY:
211 case R600_QUERY_GPU_SURF_SYNC_BUSY:
212 case R600_QUERY_GPU_DMA_BUSY:
213 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
214 case R600_QUERY_GPU_CE_BUSY:
215 query->begin_result = r600_begin_counter(rctx->screen,
216 query->b.type);
217 break;
218 case R600_QUERY_NUM_COMPILATIONS:
219 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
220 break;
221 case R600_QUERY_NUM_SHADERS_CREATED:
222 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
223 break;
224 case R600_QUERY_NUM_SHADER_CACHE_HITS:
225 query->begin_result =
226 p_atomic_read(&rctx->screen->num_shader_cache_hits);
227 break;
228 case R600_QUERY_GPIN_ASIC_ID:
229 case R600_QUERY_GPIN_NUM_SIMD:
230 case R600_QUERY_GPIN_NUM_RB:
231 case R600_QUERY_GPIN_NUM_SPI:
232 case R600_QUERY_GPIN_NUM_SE:
233 break;
234 default:
235 unreachable("r600_query_sw_begin: bad query type");
236 }
237
238 return true;
239 }
240
241 static bool r600_query_sw_end(struct r600_common_context *rctx,
242 struct r600_query *rquery)
243 {
244 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
245 enum radeon_value_id ws_id;
246
247 switch(query->b.type) {
248 case PIPE_QUERY_TIMESTAMP_DISJOINT:
249 break;
250 case PIPE_QUERY_GPU_FINISHED:
251 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
252 break;
253 case R600_QUERY_DRAW_CALLS:
254 query->end_result = rctx->num_draw_calls;
255 break;
256 case R600_QUERY_PRIM_RESTART_CALLS:
257 query->end_result = rctx->num_prim_restart_calls;
258 break;
259 case R600_QUERY_SPILL_DRAW_CALLS:
260 query->end_result = rctx->num_spill_draw_calls;
261 break;
262 case R600_QUERY_COMPUTE_CALLS:
263 query->end_result = rctx->num_compute_calls;
264 break;
265 case R600_QUERY_SPILL_COMPUTE_CALLS:
266 query->end_result = rctx->num_spill_compute_calls;
267 break;
268 case R600_QUERY_DMA_CALLS:
269 query->end_result = rctx->num_dma_calls;
270 break;
271 case R600_QUERY_CP_DMA_CALLS:
272 query->end_result = rctx->num_cp_dma_calls;
273 break;
274 case R600_QUERY_NUM_VS_FLUSHES:
275 query->end_result = rctx->num_vs_flushes;
276 break;
277 case R600_QUERY_NUM_PS_FLUSHES:
278 query->end_result = rctx->num_ps_flushes;
279 break;
280 case R600_QUERY_NUM_CS_FLUSHES:
281 query->end_result = rctx->num_cs_flushes;
282 break;
283 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
284 query->end_result = rctx->num_cb_cache_flushes;
285 break;
286 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
287 query->end_result = rctx->num_db_cache_flushes;
288 break;
289 case R600_QUERY_NUM_L2_INVALIDATES:
290 query->end_result = rctx->num_L2_invalidates;
291 break;
292 case R600_QUERY_NUM_L2_WRITEBACKS:
293 query->end_result = rctx->num_L2_writebacks;
294 break;
295 case R600_QUERY_NUM_RESIDENT_HANDLES:
296 query->end_result = rctx->num_resident_handles;
297 break;
298 case R600_QUERY_TC_OFFLOADED_SLOTS:
299 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
300 break;
301 case R600_QUERY_TC_DIRECT_SLOTS:
302 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
303 break;
304 case R600_QUERY_TC_NUM_SYNCS:
305 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
306 break;
307 case R600_QUERY_REQUESTED_VRAM:
308 case R600_QUERY_REQUESTED_GTT:
309 case R600_QUERY_MAPPED_VRAM:
310 case R600_QUERY_MAPPED_GTT:
311 case R600_QUERY_VRAM_USAGE:
312 case R600_QUERY_VRAM_VIS_USAGE:
313 case R600_QUERY_GTT_USAGE:
314 case R600_QUERY_GPU_TEMPERATURE:
315 case R600_QUERY_CURRENT_GPU_SCLK:
316 case R600_QUERY_CURRENT_GPU_MCLK:
317 case R600_QUERY_BUFFER_WAIT_TIME:
318 case R600_QUERY_NUM_MAPPED_BUFFERS:
319 case R600_QUERY_NUM_GFX_IBS:
320 case R600_QUERY_NUM_SDMA_IBS:
321 case R600_QUERY_NUM_BYTES_MOVED:
322 case R600_QUERY_NUM_EVICTIONS:
323 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
324 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
325 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
326 break;
327 }
328 case R600_QUERY_GFX_BO_LIST_SIZE:
329 ws_id = winsys_id_from_type(query->b.type);
330 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
331 query->end_time = rctx->ws->query_value(rctx->ws,
332 RADEON_NUM_GFX_IBS);
333 break;
334 case R600_QUERY_CS_THREAD_BUSY:
335 ws_id = winsys_id_from_type(query->b.type);
336 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
337 query->end_time = os_time_get_nano();
338 break;
339 case R600_QUERY_GALLIUM_THREAD_BUSY:
340 query->end_result =
341 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
342 query->end_time = os_time_get_nano();
343 break;
344 case R600_QUERY_GPU_LOAD:
345 case R600_QUERY_GPU_SHADERS_BUSY:
346 case R600_QUERY_GPU_TA_BUSY:
347 case R600_QUERY_GPU_GDS_BUSY:
348 case R600_QUERY_GPU_VGT_BUSY:
349 case R600_QUERY_GPU_IA_BUSY:
350 case R600_QUERY_GPU_SX_BUSY:
351 case R600_QUERY_GPU_WD_BUSY:
352 case R600_QUERY_GPU_BCI_BUSY:
353 case R600_QUERY_GPU_SC_BUSY:
354 case R600_QUERY_GPU_PA_BUSY:
355 case R600_QUERY_GPU_DB_BUSY:
356 case R600_QUERY_GPU_CP_BUSY:
357 case R600_QUERY_GPU_CB_BUSY:
358 case R600_QUERY_GPU_SDMA_BUSY:
359 case R600_QUERY_GPU_PFP_BUSY:
360 case R600_QUERY_GPU_MEQ_BUSY:
361 case R600_QUERY_GPU_ME_BUSY:
362 case R600_QUERY_GPU_SURF_SYNC_BUSY:
363 case R600_QUERY_GPU_DMA_BUSY:
364 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
365 case R600_QUERY_GPU_CE_BUSY:
366 query->end_result = r600_end_counter(rctx->screen,
367 query->b.type,
368 query->begin_result);
369 query->begin_result = 0;
370 break;
371 case R600_QUERY_NUM_COMPILATIONS:
372 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
373 break;
374 case R600_QUERY_NUM_SHADERS_CREATED:
375 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
376 break;
377 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
378 query->end_result = rctx->last_tex_ps_draw_ratio;
379 break;
380 case R600_QUERY_NUM_SHADER_CACHE_HITS:
381 query->end_result =
382 p_atomic_read(&rctx->screen->num_shader_cache_hits);
383 break;
384 case R600_QUERY_GPIN_ASIC_ID:
385 case R600_QUERY_GPIN_NUM_SIMD:
386 case R600_QUERY_GPIN_NUM_RB:
387 case R600_QUERY_GPIN_NUM_SPI:
388 case R600_QUERY_GPIN_NUM_SE:
389 break;
390 default:
391 unreachable("r600_query_sw_end: bad query type");
392 }
393
394 return true;
395 }
396
397 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
398 struct r600_query *rquery,
399 bool wait,
400 union pipe_query_result *result)
401 {
402 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
403
404 switch (query->b.type) {
405 case PIPE_QUERY_TIMESTAMP_DISJOINT:
406 /* Convert from cycles per millisecond to cycles per second (Hz). */
407 result->timestamp_disjoint.frequency =
408 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
409 result->timestamp_disjoint.disjoint = false;
410 return true;
411 case PIPE_QUERY_GPU_FINISHED: {
412 struct pipe_screen *screen = rctx->b.screen;
413 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
414
415 result->b = screen->fence_finish(screen, ctx, query->fence,
416 wait ? PIPE_TIMEOUT_INFINITE : 0);
417 return result->b;
418 }
419
420 case R600_QUERY_GFX_BO_LIST_SIZE:
421 result->u64 = (query->end_result - query->begin_result) /
422 (query->end_time - query->begin_time);
423 return true;
424 case R600_QUERY_CS_THREAD_BUSY:
425 case R600_QUERY_GALLIUM_THREAD_BUSY:
426 result->u64 = (query->end_result - query->begin_result) * 100 /
427 (query->end_time - query->begin_time);
428 return true;
429 case R600_QUERY_GPIN_ASIC_ID:
430 result->u32 = 0;
431 return true;
432 case R600_QUERY_GPIN_NUM_SIMD:
433 result->u32 = rctx->screen->info.num_good_compute_units;
434 return true;
435 case R600_QUERY_GPIN_NUM_RB:
436 result->u32 = rctx->screen->info.num_render_backends;
437 return true;
438 case R600_QUERY_GPIN_NUM_SPI:
439 result->u32 = 1; /* all supported chips have one SPI per SE */
440 return true;
441 case R600_QUERY_GPIN_NUM_SE:
442 result->u32 = rctx->screen->info.max_se;
443 return true;
444 }
445
446 result->u64 = query->end_result - query->begin_result;
447
448 switch (query->b.type) {
449 case R600_QUERY_BUFFER_WAIT_TIME:
450 case R600_QUERY_GPU_TEMPERATURE:
451 result->u64 /= 1000;
452 break;
453 case R600_QUERY_CURRENT_GPU_SCLK:
454 case R600_QUERY_CURRENT_GPU_MCLK:
455 result->u64 *= 1000000;
456 break;
457 }
458
459 return true;
460 }
461
462
463 static struct r600_query_ops sw_query_ops = {
464 .destroy = r600_query_sw_destroy,
465 .begin = r600_query_sw_begin,
466 .end = r600_query_sw_end,
467 .get_result = r600_query_sw_get_result,
468 .get_result_resource = NULL
469 };
470
471 static struct pipe_query *r600_query_sw_create(unsigned query_type)
472 {
473 struct r600_query_sw *query;
474
475 query = CALLOC_STRUCT(r600_query_sw);
476 if (!query)
477 return NULL;
478
479 query->b.type = query_type;
480 query->b.ops = &sw_query_ops;
481
482 return (struct pipe_query *)query;
483 }
484
485 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
486 struct r600_query *rquery)
487 {
488 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
489 struct r600_query_buffer *prev = query->buffer.previous;
490
491 /* Release all query buffers. */
492 while (prev) {
493 struct r600_query_buffer *qbuf = prev;
494 prev = prev->previous;
495 r600_resource_reference(&qbuf->buf, NULL);
496 FREE(qbuf);
497 }
498
499 r600_resource_reference(&query->buffer.buf, NULL);
500 FREE(rquery);
501 }
502
503 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
504 struct r600_query_hw *query)
505 {
506 unsigned buf_size = MAX2(query->result_size,
507 rscreen->info.min_alloc_size);
508
509 /* Queries are normally read by the CPU after
510 * being written by the gpu, hence staging is probably a good
511 * usage pattern.
512 */
513 struct r600_resource *buf = (struct r600_resource*)
514 pipe_buffer_create(&rscreen->b, 0,
515 PIPE_USAGE_STAGING, buf_size);
516 if (!buf)
517 return NULL;
518
519 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
520 r600_resource_reference(&buf, NULL);
521 return NULL;
522 }
523
524 return buf;
525 }
526
527 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
528 struct r600_query_hw *query,
529 struct r600_resource *buffer)
530 {
531 /* Callers ensure that the buffer is currently unused by the GPU. */
532 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
533 PIPE_TRANSFER_WRITE |
534 PIPE_TRANSFER_UNSYNCHRONIZED);
535 if (!results)
536 return false;
537
538 memset(results, 0, buffer->b.b.width0);
539
540 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
541 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
542 unsigned max_rbs = rscreen->info.num_render_backends;
543 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
544 unsigned num_results;
545 unsigned i, j;
546
547 /* Set top bits for unused backends. */
548 num_results = buffer->b.b.width0 / query->result_size;
549 for (j = 0; j < num_results; j++) {
550 for (i = 0; i < max_rbs; i++) {
551 if (!(enabled_rb_mask & (1<<i))) {
552 results[(i * 4)+1] = 0x80000000;
553 results[(i * 4)+3] = 0x80000000;
554 }
555 }
556 results += 4 * max_rbs;
557 }
558 }
559
560 return true;
561 }
562
563 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
564 struct r600_query *rquery,
565 bool wait,
566 enum pipe_query_value_type result_type,
567 int index,
568 struct pipe_resource *resource,
569 unsigned offset);
570
571 static struct r600_query_ops query_hw_ops = {
572 .destroy = r600_query_hw_destroy,
573 .begin = r600_query_hw_begin,
574 .end = r600_query_hw_end,
575 .get_result = r600_query_hw_get_result,
576 .get_result_resource = r600_query_hw_get_result_resource,
577 };
578
579 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
580 struct r600_query_hw *query,
581 struct r600_resource *buffer,
582 uint64_t va);
583 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
584 struct r600_query_hw *query,
585 struct r600_resource *buffer,
586 uint64_t va);
587 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
588 struct r600_query_hw *, void *buffer,
589 union pipe_query_result *result);
590 static void r600_query_hw_clear_result(struct r600_query_hw *,
591 union pipe_query_result *);
592
593 static struct r600_query_hw_ops query_hw_default_hw_ops = {
594 .prepare_buffer = r600_query_hw_prepare_buffer,
595 .emit_start = r600_query_hw_do_emit_start,
596 .emit_stop = r600_query_hw_do_emit_stop,
597 .clear_result = r600_query_hw_clear_result,
598 .add_result = r600_query_hw_add_result,
599 };
600
601 bool r600_query_hw_init(struct r600_common_screen *rscreen,
602 struct r600_query_hw *query)
603 {
604 query->buffer.buf = r600_new_query_buffer(rscreen, query);
605 if (!query->buffer.buf)
606 return false;
607
608 return true;
609 }
610
611 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
612 unsigned query_type,
613 unsigned index)
614 {
615 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
616 if (!query)
617 return NULL;
618
619 query->b.type = query_type;
620 query->b.ops = &query_hw_ops;
621 query->ops = &query_hw_default_hw_ops;
622
623 switch (query_type) {
624 case PIPE_QUERY_OCCLUSION_COUNTER:
625 case PIPE_QUERY_OCCLUSION_PREDICATE:
626 query->result_size = 16 * rscreen->info.num_render_backends;
627 query->result_size += 16; /* for the fence + alignment */
628 query->num_cs_dw_begin = 6;
629 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
630 break;
631 case PIPE_QUERY_TIME_ELAPSED:
632 query->result_size = 24;
633 query->num_cs_dw_begin = 8;
634 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
635 break;
636 case PIPE_QUERY_TIMESTAMP:
637 query->result_size = 16;
638 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
639 query->flags = R600_QUERY_HW_FLAG_NO_START;
640 break;
641 case PIPE_QUERY_PRIMITIVES_EMITTED:
642 case PIPE_QUERY_PRIMITIVES_GENERATED:
643 case PIPE_QUERY_SO_STATISTICS:
644 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
645 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
646 query->result_size = 32;
647 query->num_cs_dw_begin = 6;
648 query->num_cs_dw_end = 6;
649 query->stream = index;
650 break;
651 case PIPE_QUERY_PIPELINE_STATISTICS:
652 /* 11 values on EG, 8 on R600. */
653 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
654 query->result_size += 8; /* for the fence + alignment */
655 query->num_cs_dw_begin = 6;
656 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
657 break;
658 default:
659 assert(0);
660 FREE(query);
661 return NULL;
662 }
663
664 if (!r600_query_hw_init(rscreen, query)) {
665 FREE(query);
666 return NULL;
667 }
668
669 return (struct pipe_query *)query;
670 }
671
672 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
673 unsigned type, int diff)
674 {
675 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
676 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
677 bool old_enable = rctx->num_occlusion_queries != 0;
678 bool old_perfect_enable =
679 rctx->num_perfect_occlusion_queries != 0;
680 bool enable, perfect_enable;
681
682 rctx->num_occlusion_queries += diff;
683 assert(rctx->num_occlusion_queries >= 0);
684
685 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
686 rctx->num_perfect_occlusion_queries += diff;
687 assert(rctx->num_perfect_occlusion_queries >= 0);
688 }
689
690 enable = rctx->num_occlusion_queries != 0;
691 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
692
693 if (enable != old_enable || perfect_enable != old_perfect_enable) {
694 rctx->set_occlusion_query_state(&rctx->b, enable);
695 }
696 }
697 }
698
699 static unsigned event_type_for_stream(struct r600_query_hw *query)
700 {
701 switch (query->stream) {
702 default:
703 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
704 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
705 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
706 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
707 }
708 }
709
710 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
711 struct r600_query_hw *query,
712 struct r600_resource *buffer,
713 uint64_t va)
714 {
715 struct radeon_winsys_cs *cs = ctx->gfx.cs;
716
717 switch (query->b.type) {
718 case PIPE_QUERY_OCCLUSION_COUNTER:
719 case PIPE_QUERY_OCCLUSION_PREDICATE:
720 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
721 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
722 radeon_emit(cs, va);
723 radeon_emit(cs, va >> 32);
724 break;
725 case PIPE_QUERY_PRIMITIVES_EMITTED:
726 case PIPE_QUERY_PRIMITIVES_GENERATED:
727 case PIPE_QUERY_SO_STATISTICS:
728 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
729 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
730 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
731 radeon_emit(cs, va);
732 radeon_emit(cs, va >> 32);
733 break;
734 case PIPE_QUERY_TIME_ELAPSED:
735 if (ctx->chip_class >= SI) {
736 /* Write the timestamp from the CP not waiting for
737 * outstanding draws (top-of-pipe).
738 */
739 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
740 radeon_emit(cs, COPY_DATA_COUNT_SEL |
741 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
742 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
743 radeon_emit(cs, 0);
744 radeon_emit(cs, 0);
745 radeon_emit(cs, va);
746 radeon_emit(cs, va >> 32);
747 } else {
748 /* Write the timestamp after the last draw is done.
749 * (bottom-of-pipe)
750 */
751 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
752 0, 3, NULL, va, 0, 0);
753 }
754 break;
755 case PIPE_QUERY_PIPELINE_STATISTICS:
756 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
757 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
758 radeon_emit(cs, va);
759 radeon_emit(cs, va >> 32);
760 break;
761 default:
762 assert(0);
763 }
764 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
765 RADEON_PRIO_QUERY);
766 }
767
768 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
769 struct r600_query_hw *query)
770 {
771 uint64_t va;
772
773 if (!query->buffer.buf)
774 return; // previous buffer allocation failure
775
776 r600_update_occlusion_query_state(ctx, query->b.type, 1);
777 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
778
779 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
780 true);
781
782 /* Get a new query buffer if needed. */
783 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
784 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
785 *qbuf = query->buffer;
786 query->buffer.results_end = 0;
787 query->buffer.previous = qbuf;
788 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
789 if (!query->buffer.buf)
790 return;
791 }
792
793 /* emit begin query */
794 va = query->buffer.buf->gpu_address + query->buffer.results_end;
795
796 query->ops->emit_start(ctx, query, query->buffer.buf, va);
797
798 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
799 }
800
801 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
802 struct r600_query_hw *query,
803 struct r600_resource *buffer,
804 uint64_t va)
805 {
806 struct radeon_winsys_cs *cs = ctx->gfx.cs;
807 uint64_t fence_va = 0;
808
809 switch (query->b.type) {
810 case PIPE_QUERY_OCCLUSION_COUNTER:
811 case PIPE_QUERY_OCCLUSION_PREDICATE:
812 va += 8;
813 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
814 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
815 radeon_emit(cs, va);
816 radeon_emit(cs, va >> 32);
817
818 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
819 break;
820 case PIPE_QUERY_PRIMITIVES_EMITTED:
821 case PIPE_QUERY_PRIMITIVES_GENERATED:
822 case PIPE_QUERY_SO_STATISTICS:
823 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
824 va += query->result_size/2;
825 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
826 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
827 radeon_emit(cs, va);
828 radeon_emit(cs, va >> 32);
829 break;
830 case PIPE_QUERY_TIME_ELAPSED:
831 va += 8;
832 /* fall through */
833 case PIPE_QUERY_TIMESTAMP:
834 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
835 0, 3, NULL, va, 0, 0);
836 fence_va = va + 8;
837 break;
838 case PIPE_QUERY_PIPELINE_STATISTICS: {
839 unsigned sample_size = (query->result_size - 8) / 2;
840
841 va += sample_size;
842 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
843 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
844 radeon_emit(cs, va);
845 radeon_emit(cs, va >> 32);
846
847 fence_va = va + sample_size;
848 break;
849 }
850 default:
851 assert(0);
852 }
853 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
854 RADEON_PRIO_QUERY);
855
856 if (fence_va)
857 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
858 query->buffer.buf, fence_va, 0, 0x80000000);
859 }
860
861 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
862 struct r600_query_hw *query)
863 {
864 uint64_t va;
865
866 if (!query->buffer.buf)
867 return; // previous buffer allocation failure
868
869 /* The queries which need begin already called this in begin_query. */
870 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
871 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
872 }
873
874 /* emit end query */
875 va = query->buffer.buf->gpu_address + query->buffer.results_end;
876
877 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
878
879 query->buffer.results_end += query->result_size;
880
881 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
882 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
883
884 r600_update_occlusion_query_state(ctx, query->b.type, -1);
885 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
886 }
887
888 static void r600_emit_query_predication(struct r600_common_context *ctx,
889 struct r600_atom *atom)
890 {
891 struct radeon_winsys_cs *cs = ctx->gfx.cs;
892 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
893 struct r600_query_buffer *qbuf;
894 uint32_t op;
895 bool flag_wait;
896
897 if (!query)
898 return;
899
900 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
901 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
902
903 switch (query->b.type) {
904 case PIPE_QUERY_OCCLUSION_COUNTER:
905 case PIPE_QUERY_OCCLUSION_PREDICATE:
906 op = PRED_OP(PREDICATION_OP_ZPASS);
907 break;
908 case PIPE_QUERY_PRIMITIVES_EMITTED:
909 case PIPE_QUERY_PRIMITIVES_GENERATED:
910 case PIPE_QUERY_SO_STATISTICS:
911 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
912 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
913 break;
914 default:
915 assert(0);
916 return;
917 }
918
919 /* if true then invert, see GL_ARB_conditional_render_inverted */
920 if (ctx->render_cond_invert)
921 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
922 else
923 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
924
925 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
926
927 /* emit predicate packets for all data blocks */
928 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
929 unsigned results_base = 0;
930 uint64_t va_base = qbuf->buf->gpu_address;
931
932 while (results_base < qbuf->results_end) {
933 uint64_t va = va_base + results_base;
934
935 if (ctx->chip_class >= GFX9) {
936 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
937 radeon_emit(cs, op);
938 radeon_emit(cs, va);
939 radeon_emit(cs, va >> 32);
940 } else {
941 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
942 radeon_emit(cs, va);
943 radeon_emit(cs, op | ((va >> 32) & 0xFF));
944 }
945 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
946 RADEON_PRIO_QUERY);
947 results_base += query->result_size;
948
949 /* set CONTINUE bit for all packets except the first */
950 op |= PREDICATION_CONTINUE;
951 }
952 }
953 }
954
955 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
956 {
957 struct r600_common_screen *rscreen =
958 (struct r600_common_screen *)ctx->screen;
959
960 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
961 query_type == PIPE_QUERY_GPU_FINISHED ||
962 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
963 return r600_query_sw_create(query_type);
964
965 return r600_query_hw_create(rscreen, query_type, index);
966 }
967
968 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
969 {
970 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
971 struct r600_query *rquery = (struct r600_query *)query;
972
973 rquery->ops->destroy(rctx->screen, rquery);
974 }
975
976 static boolean r600_begin_query(struct pipe_context *ctx,
977 struct pipe_query *query)
978 {
979 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
980 struct r600_query *rquery = (struct r600_query *)query;
981
982 return rquery->ops->begin(rctx, rquery);
983 }
984
985 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
986 struct r600_query_hw *query)
987 {
988 struct r600_query_buffer *prev = query->buffer.previous;
989
990 /* Discard the old query buffers. */
991 while (prev) {
992 struct r600_query_buffer *qbuf = prev;
993 prev = prev->previous;
994 r600_resource_reference(&qbuf->buf, NULL);
995 FREE(qbuf);
996 }
997
998 query->buffer.results_end = 0;
999 query->buffer.previous = NULL;
1000
1001 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1002 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1003 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1004 r600_resource_reference(&query->buffer.buf, NULL);
1005 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1006 } else {
1007 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1008 r600_resource_reference(&query->buffer.buf, NULL);
1009 }
1010 }
1011
1012 bool r600_query_hw_begin(struct r600_common_context *rctx,
1013 struct r600_query *rquery)
1014 {
1015 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1016
1017 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1018 assert(0);
1019 return false;
1020 }
1021
1022 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1023 r600_query_hw_reset_buffers(rctx, query);
1024
1025 r600_query_hw_emit_start(rctx, query);
1026 if (!query->buffer.buf)
1027 return false;
1028
1029 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1030 return true;
1031 }
1032
1033 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1034 {
1035 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1036 struct r600_query *rquery = (struct r600_query *)query;
1037
1038 return rquery->ops->end(rctx, rquery);
1039 }
1040
1041 bool r600_query_hw_end(struct r600_common_context *rctx,
1042 struct r600_query *rquery)
1043 {
1044 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1045
1046 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1047 r600_query_hw_reset_buffers(rctx, query);
1048
1049 r600_query_hw_emit_stop(rctx, query);
1050
1051 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1052 LIST_DELINIT(&query->list);
1053
1054 if (!query->buffer.buf)
1055 return false;
1056
1057 return true;
1058 }
1059
1060 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1061 struct r600_query_hw *rquery, int index,
1062 struct r600_hw_query_params *params)
1063 {
1064 unsigned max_rbs = rctx->screen->info.num_render_backends;
1065
1066 params->pair_stride = 0;
1067 params->pair_count = 1;
1068
1069 switch (rquery->b.type) {
1070 case PIPE_QUERY_OCCLUSION_COUNTER:
1071 case PIPE_QUERY_OCCLUSION_PREDICATE:
1072 params->start_offset = 0;
1073 params->end_offset = 8;
1074 params->fence_offset = max_rbs * 16;
1075 params->pair_stride = 16;
1076 params->pair_count = max_rbs;
1077 break;
1078 case PIPE_QUERY_TIME_ELAPSED:
1079 params->start_offset = 0;
1080 params->end_offset = 8;
1081 params->fence_offset = 16;
1082 break;
1083 case PIPE_QUERY_TIMESTAMP:
1084 params->start_offset = 0;
1085 params->end_offset = 0;
1086 params->fence_offset = 8;
1087 break;
1088 case PIPE_QUERY_PRIMITIVES_EMITTED:
1089 params->start_offset = 8;
1090 params->end_offset = 24;
1091 params->fence_offset = params->end_offset + 4;
1092 break;
1093 case PIPE_QUERY_PRIMITIVES_GENERATED:
1094 params->start_offset = 0;
1095 params->end_offset = 16;
1096 params->fence_offset = params->end_offset + 4;
1097 break;
1098 case PIPE_QUERY_SO_STATISTICS:
1099 params->start_offset = 8 - index * 8;
1100 params->end_offset = 24 - index * 8;
1101 params->fence_offset = params->end_offset + 4;
1102 break;
1103 case PIPE_QUERY_PIPELINE_STATISTICS:
1104 {
1105 /* Offsets apply to EG+ */
1106 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1107 params->start_offset = offsets[index];
1108 params->end_offset = 88 + offsets[index];
1109 params->fence_offset = 2 * 88;
1110 break;
1111 }
1112 default:
1113 unreachable("r600_get_hw_query_params unsupported");
1114 }
1115 }
1116
1117 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1118 bool test_status_bit)
1119 {
1120 uint32_t *current_result = (uint32_t*)map;
1121 uint64_t start, end;
1122
1123 start = (uint64_t)current_result[start_index] |
1124 (uint64_t)current_result[start_index+1] << 32;
1125 end = (uint64_t)current_result[end_index] |
1126 (uint64_t)current_result[end_index+1] << 32;
1127
1128 if (!test_status_bit ||
1129 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1130 return end - start;
1131 }
1132 return 0;
1133 }
1134
1135 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1136 struct r600_query_hw *query,
1137 void *buffer,
1138 union pipe_query_result *result)
1139 {
1140 unsigned max_rbs = rscreen->info.num_render_backends;
1141
1142 switch (query->b.type) {
1143 case PIPE_QUERY_OCCLUSION_COUNTER: {
1144 for (unsigned i = 0; i < max_rbs; ++i) {
1145 unsigned results_base = i * 16;
1146 result->u64 +=
1147 r600_query_read_result(buffer + results_base, 0, 2, true);
1148 }
1149 break;
1150 }
1151 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1152 for (unsigned i = 0; i < max_rbs; ++i) {
1153 unsigned results_base = i * 16;
1154 result->b = result->b ||
1155 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1156 }
1157 break;
1158 }
1159 case PIPE_QUERY_TIME_ELAPSED:
1160 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1161 break;
1162 case PIPE_QUERY_TIMESTAMP:
1163 result->u64 = *(uint64_t*)buffer;
1164 break;
1165 case PIPE_QUERY_PRIMITIVES_EMITTED:
1166 /* SAMPLE_STREAMOUTSTATS stores this structure:
1167 * {
1168 * u64 NumPrimitivesWritten;
1169 * u64 PrimitiveStorageNeeded;
1170 * }
1171 * We only need NumPrimitivesWritten here. */
1172 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1173 break;
1174 case PIPE_QUERY_PRIMITIVES_GENERATED:
1175 /* Here we read PrimitiveStorageNeeded. */
1176 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1177 break;
1178 case PIPE_QUERY_SO_STATISTICS:
1179 result->so_statistics.num_primitives_written +=
1180 r600_query_read_result(buffer, 2, 6, true);
1181 result->so_statistics.primitives_storage_needed +=
1182 r600_query_read_result(buffer, 0, 4, true);
1183 break;
1184 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1185 result->b = result->b ||
1186 r600_query_read_result(buffer, 2, 6, true) !=
1187 r600_query_read_result(buffer, 0, 4, true);
1188 break;
1189 case PIPE_QUERY_PIPELINE_STATISTICS:
1190 if (rscreen->chip_class >= EVERGREEN) {
1191 result->pipeline_statistics.ps_invocations +=
1192 r600_query_read_result(buffer, 0, 22, false);
1193 result->pipeline_statistics.c_primitives +=
1194 r600_query_read_result(buffer, 2, 24, false);
1195 result->pipeline_statistics.c_invocations +=
1196 r600_query_read_result(buffer, 4, 26, false);
1197 result->pipeline_statistics.vs_invocations +=
1198 r600_query_read_result(buffer, 6, 28, false);
1199 result->pipeline_statistics.gs_invocations +=
1200 r600_query_read_result(buffer, 8, 30, false);
1201 result->pipeline_statistics.gs_primitives +=
1202 r600_query_read_result(buffer, 10, 32, false);
1203 result->pipeline_statistics.ia_primitives +=
1204 r600_query_read_result(buffer, 12, 34, false);
1205 result->pipeline_statistics.ia_vertices +=
1206 r600_query_read_result(buffer, 14, 36, false);
1207 result->pipeline_statistics.hs_invocations +=
1208 r600_query_read_result(buffer, 16, 38, false);
1209 result->pipeline_statistics.ds_invocations +=
1210 r600_query_read_result(buffer, 18, 40, false);
1211 result->pipeline_statistics.cs_invocations +=
1212 r600_query_read_result(buffer, 20, 42, false);
1213 } else {
1214 result->pipeline_statistics.ps_invocations +=
1215 r600_query_read_result(buffer, 0, 16, false);
1216 result->pipeline_statistics.c_primitives +=
1217 r600_query_read_result(buffer, 2, 18, false);
1218 result->pipeline_statistics.c_invocations +=
1219 r600_query_read_result(buffer, 4, 20, false);
1220 result->pipeline_statistics.vs_invocations +=
1221 r600_query_read_result(buffer, 6, 22, false);
1222 result->pipeline_statistics.gs_invocations +=
1223 r600_query_read_result(buffer, 8, 24, false);
1224 result->pipeline_statistics.gs_primitives +=
1225 r600_query_read_result(buffer, 10, 26, false);
1226 result->pipeline_statistics.ia_primitives +=
1227 r600_query_read_result(buffer, 12, 28, false);
1228 result->pipeline_statistics.ia_vertices +=
1229 r600_query_read_result(buffer, 14, 30, false);
1230 }
1231 #if 0 /* for testing */
1232 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1233 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1234 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1235 result->pipeline_statistics.ia_vertices,
1236 result->pipeline_statistics.ia_primitives,
1237 result->pipeline_statistics.vs_invocations,
1238 result->pipeline_statistics.hs_invocations,
1239 result->pipeline_statistics.ds_invocations,
1240 result->pipeline_statistics.gs_invocations,
1241 result->pipeline_statistics.gs_primitives,
1242 result->pipeline_statistics.c_invocations,
1243 result->pipeline_statistics.c_primitives,
1244 result->pipeline_statistics.ps_invocations,
1245 result->pipeline_statistics.cs_invocations);
1246 #endif
1247 break;
1248 default:
1249 assert(0);
1250 }
1251 }
1252
1253 static boolean r600_get_query_result(struct pipe_context *ctx,
1254 struct pipe_query *query, boolean wait,
1255 union pipe_query_result *result)
1256 {
1257 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1258 struct r600_query *rquery = (struct r600_query *)query;
1259
1260 return rquery->ops->get_result(rctx, rquery, wait, result);
1261 }
1262
1263 static void r600_get_query_result_resource(struct pipe_context *ctx,
1264 struct pipe_query *query,
1265 boolean wait,
1266 enum pipe_query_value_type result_type,
1267 int index,
1268 struct pipe_resource *resource,
1269 unsigned offset)
1270 {
1271 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1272 struct r600_query *rquery = (struct r600_query *)query;
1273
1274 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1275 resource, offset);
1276 }
1277
1278 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1279 union pipe_query_result *result)
1280 {
1281 util_query_clear_result(result, query->b.type);
1282 }
1283
1284 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1285 struct r600_query *rquery,
1286 bool wait, union pipe_query_result *result)
1287 {
1288 struct r600_common_screen *rscreen = rctx->screen;
1289 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1290 struct r600_query_buffer *qbuf;
1291
1292 query->ops->clear_result(query, result);
1293
1294 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1295 unsigned usage = PIPE_TRANSFER_READ |
1296 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1297 unsigned results_base = 0;
1298 void *map;
1299
1300 if (rquery->b.flushed)
1301 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1302 else
1303 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1304
1305 if (!map)
1306 return false;
1307
1308 while (results_base != qbuf->results_end) {
1309 query->ops->add_result(rscreen, query, map + results_base,
1310 result);
1311 results_base += query->result_size;
1312 }
1313 }
1314
1315 /* Convert the time to expected units. */
1316 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1317 rquery->type == PIPE_QUERY_TIMESTAMP) {
1318 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1319 }
1320 return true;
1321 }
1322
1323 /* Create the compute shader that is used to collect the results.
1324 *
1325 * One compute grid with a single thread is launched for every query result
1326 * buffer. The thread (optionally) reads a previous summary buffer, then
1327 * accumulates data from the query result buffer, and writes the result either
1328 * to a summary buffer to be consumed by the next grid invocation or to the
1329 * user-supplied buffer.
1330 *
1331 * Data layout:
1332 *
1333 * CONST
1334 * 0.x = end_offset
1335 * 0.y = result_stride
1336 * 0.z = result_count
1337 * 0.w = bit field:
1338 * 1: read previously accumulated values
1339 * 2: write accumulated values for chaining
1340 * 4: write result available
1341 * 8: convert result to boolean (0/1)
1342 * 16: only read one dword and use that as result
1343 * 32: apply timestamp conversion
1344 * 64: store full 64 bits result
1345 * 128: store signed 32 bits result
1346 * 1.x = fence_offset
1347 * 1.y = pair_stride
1348 * 1.z = pair_count
1349 *
1350 * BUFFER[0] = query result buffer
1351 * BUFFER[1] = previous summary buffer
1352 * BUFFER[2] = next summary buffer or user-supplied buffer
1353 */
1354 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1355 {
1356 /* TEMP[0].xy = accumulated result so far
1357 * TEMP[0].z = result not available
1358 *
1359 * TEMP[1].x = current result index
1360 * TEMP[1].y = current pair index
1361 */
1362 static const char text_tmpl[] =
1363 "COMP\n"
1364 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1365 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1366 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1367 "DCL BUFFER[0]\n"
1368 "DCL BUFFER[1]\n"
1369 "DCL BUFFER[2]\n"
1370 "DCL CONST[0..1]\n"
1371 "DCL TEMP[0..5]\n"
1372 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1373 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1374 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1375 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1376
1377 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1378 "UIF TEMP[5]\n"
1379 /* Check result availability. */
1380 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1381 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1382 "MOV TEMP[1], TEMP[0].zzzz\n"
1383 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1384
1385 /* Load result if available. */
1386 "UIF TEMP[1]\n"
1387 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1388 "ENDIF\n"
1389 "ELSE\n"
1390 /* Load previously accumulated result if requested. */
1391 "MOV TEMP[0], IMM[0].xxxx\n"
1392 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1393 "UIF TEMP[4]\n"
1394 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1395 "ENDIF\n"
1396
1397 "MOV TEMP[1].x, IMM[0].xxxx\n"
1398 "BGNLOOP\n"
1399 /* Break if accumulated result so far is not available. */
1400 "UIF TEMP[0].zzzz\n"
1401 "BRK\n"
1402 "ENDIF\n"
1403
1404 /* Break if result_index >= result_count. */
1405 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1406 "UIF TEMP[5]\n"
1407 "BRK\n"
1408 "ENDIF\n"
1409
1410 /* Load fence and check result availability */
1411 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1412 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1413 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1414 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1415 "UIF TEMP[0].zzzz\n"
1416 "BRK\n"
1417 "ENDIF\n"
1418
1419 "MOV TEMP[1].y, IMM[0].xxxx\n"
1420 "BGNLOOP\n"
1421 /* Load start and end. */
1422 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1423 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1424 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1425
1426 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1427 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1428
1429 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1430 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1431
1432 /* Increment pair index */
1433 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1434 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1435 "UIF TEMP[5]\n"
1436 "BRK\n"
1437 "ENDIF\n"
1438 "ENDLOOP\n"
1439
1440 /* Increment result index */
1441 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1442 "ENDLOOP\n"
1443 "ENDIF\n"
1444
1445 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1446 "UIF TEMP[4]\n"
1447 /* Store accumulated data for chaining. */
1448 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1449 "ELSE\n"
1450 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1451 "UIF TEMP[4]\n"
1452 /* Store result availability. */
1453 "NOT TEMP[0].z, TEMP[0]\n"
1454 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1455 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1456
1457 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1458 "UIF TEMP[4]\n"
1459 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1460 "ENDIF\n"
1461 "ELSE\n"
1462 /* Store result if it is available. */
1463 "NOT TEMP[4], TEMP[0].zzzz\n"
1464 "UIF TEMP[4]\n"
1465 /* Apply timestamp conversion */
1466 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1467 "UIF TEMP[4]\n"
1468 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1469 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1470 "ENDIF\n"
1471
1472 /* Convert to boolean */
1473 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1474 "UIF TEMP[4]\n"
1475 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1476 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1477 "MOV TEMP[0].y, IMM[0].xxxx\n"
1478 "ENDIF\n"
1479
1480 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1481 "UIF TEMP[4]\n"
1482 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1483 "ELSE\n"
1484 /* Clamping */
1485 "UIF TEMP[0].yyyy\n"
1486 "MOV TEMP[0].x, IMM[0].wwww\n"
1487 "ENDIF\n"
1488
1489 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1490 "UIF TEMP[4]\n"
1491 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1492 "ENDIF\n"
1493
1494 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1495 "ENDIF\n"
1496 "ENDIF\n"
1497 "ENDIF\n"
1498 "ENDIF\n"
1499
1500 "END\n";
1501
1502 char text[sizeof(text_tmpl) + 32];
1503 struct tgsi_token tokens[1024];
1504 struct pipe_compute_state state = {};
1505
1506 /* Hard code the frequency into the shader so that the backend can
1507 * use the full range of optimizations for divide-by-constant.
1508 */
1509 snprintf(text, sizeof(text), text_tmpl,
1510 rctx->screen->info.clock_crystal_freq);
1511
1512 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1513 assert(false);
1514 return;
1515 }
1516
1517 state.ir_type = PIPE_SHADER_IR_TGSI;
1518 state.prog = tokens;
1519
1520 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1521 }
1522
1523 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1524 struct r600_qbo_state *st)
1525 {
1526 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1527
1528 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1529 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1530
1531 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1532 for (unsigned i = 0; i < 3; ++i)
1533 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1534 }
1535
1536 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1537 struct r600_query *rquery,
1538 bool wait,
1539 enum pipe_query_value_type result_type,
1540 int index,
1541 struct pipe_resource *resource,
1542 unsigned offset)
1543 {
1544 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1545 struct r600_query_buffer *qbuf;
1546 struct r600_query_buffer *qbuf_prev;
1547 struct pipe_resource *tmp_buffer = NULL;
1548 unsigned tmp_buffer_offset = 0;
1549 struct r600_qbo_state saved_state = {};
1550 struct pipe_grid_info grid = {};
1551 struct pipe_constant_buffer constant_buffer = {};
1552 struct pipe_shader_buffer ssbo[3];
1553 struct r600_hw_query_params params;
1554 struct {
1555 uint32_t end_offset;
1556 uint32_t result_stride;
1557 uint32_t result_count;
1558 uint32_t config;
1559 uint32_t fence_offset;
1560 uint32_t pair_stride;
1561 uint32_t pair_count;
1562 } consts;
1563
1564 if (!rctx->query_result_shader) {
1565 r600_create_query_result_shader(rctx);
1566 if (!rctx->query_result_shader)
1567 return;
1568 }
1569
1570 if (query->buffer.previous) {
1571 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1572 &tmp_buffer_offset, &tmp_buffer);
1573 if (!tmp_buffer)
1574 return;
1575 }
1576
1577 rctx->save_qbo_state(&rctx->b, &saved_state);
1578
1579 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1580 consts.end_offset = params.end_offset - params.start_offset;
1581 consts.fence_offset = params.fence_offset - params.start_offset;
1582 consts.result_stride = query->result_size;
1583 consts.pair_stride = params.pair_stride;
1584 consts.pair_count = params.pair_count;
1585
1586 constant_buffer.buffer_size = sizeof(consts);
1587 constant_buffer.user_buffer = &consts;
1588
1589 ssbo[1].buffer = tmp_buffer;
1590 ssbo[1].buffer_offset = tmp_buffer_offset;
1591 ssbo[1].buffer_size = 16;
1592
1593 ssbo[2] = ssbo[1];
1594
1595 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1596
1597 grid.block[0] = 1;
1598 grid.block[1] = 1;
1599 grid.block[2] = 1;
1600 grid.grid[0] = 1;
1601 grid.grid[1] = 1;
1602 grid.grid[2] = 1;
1603
1604 consts.config = 0;
1605 if (index < 0)
1606 consts.config |= 4;
1607 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1608 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1609 consts.config |= 8;
1610 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1611 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1612 consts.config |= 32;
1613
1614 switch (result_type) {
1615 case PIPE_QUERY_TYPE_U64:
1616 case PIPE_QUERY_TYPE_I64:
1617 consts.config |= 64;
1618 break;
1619 case PIPE_QUERY_TYPE_I32:
1620 consts.config |= 128;
1621 break;
1622 case PIPE_QUERY_TYPE_U32:
1623 break;
1624 }
1625
1626 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1627
1628 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1629 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1630 qbuf_prev = qbuf->previous;
1631 consts.result_count = qbuf->results_end / query->result_size;
1632 consts.config &= ~3;
1633 if (qbuf != &query->buffer)
1634 consts.config |= 1;
1635 if (qbuf->previous)
1636 consts.config |= 2;
1637 } else {
1638 /* Only read the last timestamp. */
1639 qbuf_prev = NULL;
1640 consts.result_count = 0;
1641 consts.config |= 16;
1642 params.start_offset += qbuf->results_end - query->result_size;
1643 }
1644
1645 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1646
1647 ssbo[0].buffer = &qbuf->buf->b.b;
1648 ssbo[0].buffer_offset = params.start_offset;
1649 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1650
1651 if (!qbuf->previous) {
1652 ssbo[2].buffer = resource;
1653 ssbo[2].buffer_offset = offset;
1654 ssbo[2].buffer_size = 8;
1655
1656 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1657 }
1658
1659 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1660
1661 if (wait && qbuf == &query->buffer) {
1662 uint64_t va;
1663
1664 /* Wait for result availability. Wait only for readiness
1665 * of the last entry, since the fence writes should be
1666 * serialized in the CP.
1667 */
1668 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1669 va += params.fence_offset;
1670
1671 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1672 }
1673
1674 rctx->b.launch_grid(&rctx->b, &grid);
1675 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1676 }
1677
1678 r600_restore_qbo_state(rctx, &saved_state);
1679 pipe_resource_reference(&tmp_buffer, NULL);
1680 }
1681
1682 static void r600_render_condition(struct pipe_context *ctx,
1683 struct pipe_query *query,
1684 boolean condition,
1685 enum pipe_render_cond_flag mode)
1686 {
1687 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1688 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1689 struct r600_query_buffer *qbuf;
1690 struct r600_atom *atom = &rctx->render_cond_atom;
1691
1692 rctx->render_cond = query;
1693 rctx->render_cond_invert = condition;
1694 rctx->render_cond_mode = mode;
1695
1696 /* Compute the size of SET_PREDICATION packets. */
1697 atom->num_dw = 0;
1698 if (query) {
1699 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1700 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1701 }
1702
1703 rctx->set_atom_dirty(rctx, atom, query != NULL);
1704 }
1705
1706 void r600_suspend_queries(struct r600_common_context *ctx)
1707 {
1708 struct r600_query_hw *query;
1709
1710 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1711 r600_query_hw_emit_stop(ctx, query);
1712 }
1713 assert(ctx->num_cs_dw_queries_suspend == 0);
1714 }
1715
1716 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1717 struct list_head *query_list)
1718 {
1719 struct r600_query_hw *query;
1720 unsigned num_dw = 0;
1721
1722 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1723 /* begin + end */
1724 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1725
1726 /* Workaround for the fact that
1727 * num_cs_dw_nontimer_queries_suspend is incremented for every
1728 * resumed query, which raises the bar in need_cs_space for
1729 * queries about to be resumed.
1730 */
1731 num_dw += query->num_cs_dw_end;
1732 }
1733 /* primitives generated query */
1734 num_dw += ctx->streamout.enable_atom.num_dw;
1735 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1736 num_dw += 13;
1737
1738 return num_dw;
1739 }
1740
1741 void r600_resume_queries(struct r600_common_context *ctx)
1742 {
1743 struct r600_query_hw *query;
1744 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1745
1746 assert(ctx->num_cs_dw_queries_suspend == 0);
1747
1748 /* Check CS space here. Resuming must not be interrupted by flushes. */
1749 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1750
1751 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1752 r600_query_hw_emit_start(ctx, query);
1753 }
1754 }
1755
1756 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1757 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1758 {
1759 struct r600_common_context *ctx =
1760 (struct r600_common_context*)rscreen->aux_context;
1761 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1762 struct r600_resource *buffer;
1763 uint32_t *results;
1764 unsigned i, mask = 0;
1765 unsigned max_rbs = ctx->screen->info.num_render_backends;
1766
1767 assert(rscreen->chip_class <= CAYMAN);
1768
1769 /* if backend_map query is supported by the kernel */
1770 if (rscreen->info.r600_gb_backend_map_valid) {
1771 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1772 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1773 unsigned item_width, item_mask;
1774
1775 if (ctx->chip_class >= EVERGREEN) {
1776 item_width = 4;
1777 item_mask = 0x7;
1778 } else {
1779 item_width = 2;
1780 item_mask = 0x3;
1781 }
1782
1783 while (num_tile_pipes--) {
1784 i = backend_map & item_mask;
1785 mask |= (1<<i);
1786 backend_map >>= item_width;
1787 }
1788 if (mask != 0) {
1789 rscreen->info.enabled_rb_mask = mask;
1790 return;
1791 }
1792 }
1793
1794 /* otherwise backup path for older kernels */
1795
1796 /* create buffer for event data */
1797 buffer = (struct r600_resource*)
1798 pipe_buffer_create(ctx->b.screen, 0,
1799 PIPE_USAGE_STAGING, max_rbs * 16);
1800 if (!buffer)
1801 return;
1802
1803 /* initialize buffer with zeroes */
1804 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1805 if (results) {
1806 memset(results, 0, max_rbs * 4 * 4);
1807
1808 /* emit EVENT_WRITE for ZPASS_DONE */
1809 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1810 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1811 radeon_emit(cs, buffer->gpu_address);
1812 radeon_emit(cs, buffer->gpu_address >> 32);
1813
1814 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1815 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1816
1817 /* analyze results */
1818 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1819 if (results) {
1820 for(i = 0; i < max_rbs; i++) {
1821 /* at least highest bit will be set if backend is used */
1822 if (results[i*4 + 1])
1823 mask |= (1<<i);
1824 }
1825 }
1826 }
1827
1828 r600_resource_reference(&buffer, NULL);
1829
1830 if (mask)
1831 rscreen->info.enabled_rb_mask = mask;
1832 }
1833
1834 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1835 { \
1836 .name = name_, \
1837 .query_type = R600_QUERY_##query_type_, \
1838 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1839 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1840 .group_id = group_id_ \
1841 }
1842
1843 #define X(name_, query_type_, type_, result_type_) \
1844 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1845
1846 #define XG(group_, name_, query_type_, type_, result_type_) \
1847 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1848
1849 static struct pipe_driver_query_info r600_driver_query_list[] = {
1850 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1851 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1852 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1853 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1854 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1855 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1856 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1857 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1858 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1859 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1860 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1861 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1862 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1863 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1864 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1865 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1866 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1867 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1868 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1869 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1870 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1871 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1872 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1873 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1874 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1875 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1876 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1877 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1878 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1879 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1880 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1881 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1882 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1883 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1884 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1885 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1886 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1887 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1888 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1889
1890 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1891 * which use it as a fallback path to detect the GPU type.
1892 *
1893 * Note: The names of these queries are significant for GPUPerfStudio
1894 * (and possibly their order as well). */
1895 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1896 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1897 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1898 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1899 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1900
1901 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1902 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1903 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1904
1905 /* The following queries must be at the end of the list because their
1906 * availability is adjusted dynamically based on the DRM version. */
1907 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1908 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1909 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1910 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1911 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1912 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1913 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1914 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1915 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1916 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1917 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1918 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1919 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1920 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1921 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1922 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1923 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1924 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1925 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1926 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1927 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1928 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1929 };
1930
1931 #undef X
1932 #undef XG
1933 #undef XFULL
1934
1935 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1936 {
1937 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1938 return ARRAY_SIZE(r600_driver_query_list);
1939 else if (rscreen->info.drm_major == 3) {
1940 if (rscreen->chip_class >= VI)
1941 return ARRAY_SIZE(r600_driver_query_list);
1942 else
1943 return ARRAY_SIZE(r600_driver_query_list) - 7;
1944 }
1945 else
1946 return ARRAY_SIZE(r600_driver_query_list) - 25;
1947 }
1948
1949 static int r600_get_driver_query_info(struct pipe_screen *screen,
1950 unsigned index,
1951 struct pipe_driver_query_info *info)
1952 {
1953 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1954 unsigned num_queries = r600_get_num_queries(rscreen);
1955
1956 if (!info) {
1957 unsigned num_perfcounters =
1958 r600_get_perfcounter_info(rscreen, 0, NULL);
1959
1960 return num_queries + num_perfcounters;
1961 }
1962
1963 if (index >= num_queries)
1964 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1965
1966 *info = r600_driver_query_list[index];
1967
1968 switch (info->query_type) {
1969 case R600_QUERY_REQUESTED_VRAM:
1970 case R600_QUERY_VRAM_USAGE:
1971 case R600_QUERY_MAPPED_VRAM:
1972 info->max_value.u64 = rscreen->info.vram_size;
1973 break;
1974 case R600_QUERY_REQUESTED_GTT:
1975 case R600_QUERY_GTT_USAGE:
1976 case R600_QUERY_MAPPED_GTT:
1977 info->max_value.u64 = rscreen->info.gart_size;
1978 break;
1979 case R600_QUERY_GPU_TEMPERATURE:
1980 info->max_value.u64 = 125;
1981 break;
1982 case R600_QUERY_VRAM_VIS_USAGE:
1983 info->max_value.u64 = rscreen->info.vram_vis_size;
1984 break;
1985 }
1986
1987 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1988 info->group_id += rscreen->perfcounters->num_groups;
1989
1990 return 1;
1991 }
1992
1993 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1994 * performance counter groups, so be careful when changing this and related
1995 * functions.
1996 */
1997 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1998 unsigned index,
1999 struct pipe_driver_query_group_info *info)
2000 {
2001 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2002 unsigned num_pc_groups = 0;
2003
2004 if (rscreen->perfcounters)
2005 num_pc_groups = rscreen->perfcounters->num_groups;
2006
2007 if (!info)
2008 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2009
2010 if (index < num_pc_groups)
2011 return r600_get_perfcounter_group_info(rscreen, index, info);
2012
2013 index -= num_pc_groups;
2014 if (index >= R600_NUM_SW_QUERY_GROUPS)
2015 return 0;
2016
2017 info->name = "GPIN";
2018 info->max_active_queries = 5;
2019 info->num_queries = 5;
2020 return 1;
2021 }
2022
2023 void r600_query_init(struct r600_common_context *rctx)
2024 {
2025 rctx->b.create_query = r600_create_query;
2026 rctx->b.create_batch_query = r600_create_batch_query;
2027 rctx->b.destroy_query = r600_destroy_query;
2028 rctx->b.begin_query = r600_begin_query;
2029 rctx->b.end_query = r600_end_query;
2030 rctx->b.get_query_result = r600_get_query_result;
2031 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2032 rctx->render_cond_atom.emit = r600_emit_query_predication;
2033
2034 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2035 rctx->b.render_condition = r600_render_condition;
2036
2037 LIST_INITHEAD(&rctx->active_queries);
2038 }
2039
2040 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2041 {
2042 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2043 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2044 }