gallium/radeon: fix render predication by SO overflow predicate
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46
47 uint64_t begin_time;
48 uint64_t end_time;
49
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle *fence;
52 };
53
54 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
55 struct r600_query *rquery)
56 {
57 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
58
59 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
60 FREE(query);
61 }
62
63 static enum radeon_value_id winsys_id_from_type(unsigned type)
64 {
65 switch (type) {
66 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
67 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
68 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
69 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
70 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
71 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
72 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
73 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
74 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
75 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
76 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
77 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
78 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
79 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
80 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
81 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
82 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
83 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
84 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
85 default: unreachable("query type does not correspond to winsys id");
86 }
87 }
88
89 static bool r600_query_sw_begin(struct r600_common_context *rctx,
90 struct r600_query *rquery)
91 {
92 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
93 enum radeon_value_id ws_id;
94
95 switch(query->b.type) {
96 case PIPE_QUERY_TIMESTAMP_DISJOINT:
97 case PIPE_QUERY_GPU_FINISHED:
98 break;
99 case R600_QUERY_DRAW_CALLS:
100 query->begin_result = rctx->num_draw_calls;
101 break;
102 case R600_QUERY_MRT_DRAW_CALLS:
103 query->begin_result = rctx->num_mrt_draw_calls;
104 break;
105 case R600_QUERY_PRIM_RESTART_CALLS:
106 query->begin_result = rctx->num_prim_restart_calls;
107 break;
108 case R600_QUERY_SPILL_DRAW_CALLS:
109 query->begin_result = rctx->num_spill_draw_calls;
110 break;
111 case R600_QUERY_COMPUTE_CALLS:
112 query->begin_result = rctx->num_compute_calls;
113 break;
114 case R600_QUERY_SPILL_COMPUTE_CALLS:
115 query->begin_result = rctx->num_spill_compute_calls;
116 break;
117 case R600_QUERY_DMA_CALLS:
118 query->begin_result = rctx->num_dma_calls;
119 break;
120 case R600_QUERY_CP_DMA_CALLS:
121 query->begin_result = rctx->num_cp_dma_calls;
122 break;
123 case R600_QUERY_NUM_VS_FLUSHES:
124 query->begin_result = rctx->num_vs_flushes;
125 break;
126 case R600_QUERY_NUM_PS_FLUSHES:
127 query->begin_result = rctx->num_ps_flushes;
128 break;
129 case R600_QUERY_NUM_CS_FLUSHES:
130 query->begin_result = rctx->num_cs_flushes;
131 break;
132 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
133 query->begin_result = rctx->num_cb_cache_flushes;
134 break;
135 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
136 query->begin_result = rctx->num_db_cache_flushes;
137 break;
138 case R600_QUERY_NUM_L2_INVALIDATES:
139 query->begin_result = rctx->num_L2_invalidates;
140 break;
141 case R600_QUERY_NUM_L2_WRITEBACKS:
142 query->begin_result = rctx->num_L2_writebacks;
143 break;
144 case R600_QUERY_NUM_RESIDENT_HANDLES:
145 query->begin_result = rctx->num_resident_handles;
146 break;
147 case R600_QUERY_TC_OFFLOADED_SLOTS:
148 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
149 break;
150 case R600_QUERY_TC_DIRECT_SLOTS:
151 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
152 break;
153 case R600_QUERY_TC_NUM_SYNCS:
154 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
155 break;
156 case R600_QUERY_REQUESTED_VRAM:
157 case R600_QUERY_REQUESTED_GTT:
158 case R600_QUERY_MAPPED_VRAM:
159 case R600_QUERY_MAPPED_GTT:
160 case R600_QUERY_VRAM_USAGE:
161 case R600_QUERY_VRAM_VIS_USAGE:
162 case R600_QUERY_GTT_USAGE:
163 case R600_QUERY_GPU_TEMPERATURE:
164 case R600_QUERY_CURRENT_GPU_SCLK:
165 case R600_QUERY_CURRENT_GPU_MCLK:
166 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
167 case R600_QUERY_NUM_MAPPED_BUFFERS:
168 query->begin_result = 0;
169 break;
170 case R600_QUERY_BUFFER_WAIT_TIME:
171 case R600_QUERY_NUM_GFX_IBS:
172 case R600_QUERY_NUM_SDMA_IBS:
173 case R600_QUERY_NUM_BYTES_MOVED:
174 case R600_QUERY_NUM_EVICTIONS:
175 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
176 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
177 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
178 break;
179 }
180 case R600_QUERY_GFX_BO_LIST_SIZE:
181 ws_id = winsys_id_from_type(query->b.type);
182 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
183 query->begin_time = rctx->ws->query_value(rctx->ws,
184 RADEON_NUM_GFX_IBS);
185 break;
186 case R600_QUERY_CS_THREAD_BUSY:
187 ws_id = winsys_id_from_type(query->b.type);
188 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
189 query->begin_time = os_time_get_nano();
190 break;
191 case R600_QUERY_GALLIUM_THREAD_BUSY:
192 query->begin_result =
193 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
194 query->begin_time = os_time_get_nano();
195 break;
196 case R600_QUERY_GPU_LOAD:
197 case R600_QUERY_GPU_SHADERS_BUSY:
198 case R600_QUERY_GPU_TA_BUSY:
199 case R600_QUERY_GPU_GDS_BUSY:
200 case R600_QUERY_GPU_VGT_BUSY:
201 case R600_QUERY_GPU_IA_BUSY:
202 case R600_QUERY_GPU_SX_BUSY:
203 case R600_QUERY_GPU_WD_BUSY:
204 case R600_QUERY_GPU_BCI_BUSY:
205 case R600_QUERY_GPU_SC_BUSY:
206 case R600_QUERY_GPU_PA_BUSY:
207 case R600_QUERY_GPU_DB_BUSY:
208 case R600_QUERY_GPU_CP_BUSY:
209 case R600_QUERY_GPU_CB_BUSY:
210 case R600_QUERY_GPU_SDMA_BUSY:
211 case R600_QUERY_GPU_PFP_BUSY:
212 case R600_QUERY_GPU_MEQ_BUSY:
213 case R600_QUERY_GPU_ME_BUSY:
214 case R600_QUERY_GPU_SURF_SYNC_BUSY:
215 case R600_QUERY_GPU_DMA_BUSY:
216 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
217 case R600_QUERY_GPU_CE_BUSY:
218 query->begin_result = r600_begin_counter(rctx->screen,
219 query->b.type);
220 break;
221 case R600_QUERY_NUM_COMPILATIONS:
222 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
223 break;
224 case R600_QUERY_NUM_SHADERS_CREATED:
225 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
226 break;
227 case R600_QUERY_NUM_SHADER_CACHE_HITS:
228 query->begin_result =
229 p_atomic_read(&rctx->screen->num_shader_cache_hits);
230 break;
231 case R600_QUERY_GPIN_ASIC_ID:
232 case R600_QUERY_GPIN_NUM_SIMD:
233 case R600_QUERY_GPIN_NUM_RB:
234 case R600_QUERY_GPIN_NUM_SPI:
235 case R600_QUERY_GPIN_NUM_SE:
236 break;
237 default:
238 unreachable("r600_query_sw_begin: bad query type");
239 }
240
241 return true;
242 }
243
244 static bool r600_query_sw_end(struct r600_common_context *rctx,
245 struct r600_query *rquery)
246 {
247 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
248 enum radeon_value_id ws_id;
249
250 switch(query->b.type) {
251 case PIPE_QUERY_TIMESTAMP_DISJOINT:
252 break;
253 case PIPE_QUERY_GPU_FINISHED:
254 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
255 break;
256 case R600_QUERY_DRAW_CALLS:
257 query->end_result = rctx->num_draw_calls;
258 break;
259 case R600_QUERY_MRT_DRAW_CALLS:
260 query->end_result = rctx->num_mrt_draw_calls;
261 break;
262 case R600_QUERY_PRIM_RESTART_CALLS:
263 query->end_result = rctx->num_prim_restart_calls;
264 break;
265 case R600_QUERY_SPILL_DRAW_CALLS:
266 query->end_result = rctx->num_spill_draw_calls;
267 break;
268 case R600_QUERY_COMPUTE_CALLS:
269 query->end_result = rctx->num_compute_calls;
270 break;
271 case R600_QUERY_SPILL_COMPUTE_CALLS:
272 query->end_result = rctx->num_spill_compute_calls;
273 break;
274 case R600_QUERY_DMA_CALLS:
275 query->end_result = rctx->num_dma_calls;
276 break;
277 case R600_QUERY_CP_DMA_CALLS:
278 query->end_result = rctx->num_cp_dma_calls;
279 break;
280 case R600_QUERY_NUM_VS_FLUSHES:
281 query->end_result = rctx->num_vs_flushes;
282 break;
283 case R600_QUERY_NUM_PS_FLUSHES:
284 query->end_result = rctx->num_ps_flushes;
285 break;
286 case R600_QUERY_NUM_CS_FLUSHES:
287 query->end_result = rctx->num_cs_flushes;
288 break;
289 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
290 query->end_result = rctx->num_cb_cache_flushes;
291 break;
292 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
293 query->end_result = rctx->num_db_cache_flushes;
294 break;
295 case R600_QUERY_NUM_L2_INVALIDATES:
296 query->end_result = rctx->num_L2_invalidates;
297 break;
298 case R600_QUERY_NUM_L2_WRITEBACKS:
299 query->end_result = rctx->num_L2_writebacks;
300 break;
301 case R600_QUERY_NUM_RESIDENT_HANDLES:
302 query->end_result = rctx->num_resident_handles;
303 break;
304 case R600_QUERY_TC_OFFLOADED_SLOTS:
305 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
306 break;
307 case R600_QUERY_TC_DIRECT_SLOTS:
308 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
309 break;
310 case R600_QUERY_TC_NUM_SYNCS:
311 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
312 break;
313 case R600_QUERY_REQUESTED_VRAM:
314 case R600_QUERY_REQUESTED_GTT:
315 case R600_QUERY_MAPPED_VRAM:
316 case R600_QUERY_MAPPED_GTT:
317 case R600_QUERY_VRAM_USAGE:
318 case R600_QUERY_VRAM_VIS_USAGE:
319 case R600_QUERY_GTT_USAGE:
320 case R600_QUERY_GPU_TEMPERATURE:
321 case R600_QUERY_CURRENT_GPU_SCLK:
322 case R600_QUERY_CURRENT_GPU_MCLK:
323 case R600_QUERY_BUFFER_WAIT_TIME:
324 case R600_QUERY_NUM_MAPPED_BUFFERS:
325 case R600_QUERY_NUM_GFX_IBS:
326 case R600_QUERY_NUM_SDMA_IBS:
327 case R600_QUERY_NUM_BYTES_MOVED:
328 case R600_QUERY_NUM_EVICTIONS:
329 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
330 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
331 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
332 break;
333 }
334 case R600_QUERY_GFX_BO_LIST_SIZE:
335 ws_id = winsys_id_from_type(query->b.type);
336 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
337 query->end_time = rctx->ws->query_value(rctx->ws,
338 RADEON_NUM_GFX_IBS);
339 break;
340 case R600_QUERY_CS_THREAD_BUSY:
341 ws_id = winsys_id_from_type(query->b.type);
342 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
343 query->end_time = os_time_get_nano();
344 break;
345 case R600_QUERY_GALLIUM_THREAD_BUSY:
346 query->end_result =
347 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
348 query->end_time = os_time_get_nano();
349 break;
350 case R600_QUERY_GPU_LOAD:
351 case R600_QUERY_GPU_SHADERS_BUSY:
352 case R600_QUERY_GPU_TA_BUSY:
353 case R600_QUERY_GPU_GDS_BUSY:
354 case R600_QUERY_GPU_VGT_BUSY:
355 case R600_QUERY_GPU_IA_BUSY:
356 case R600_QUERY_GPU_SX_BUSY:
357 case R600_QUERY_GPU_WD_BUSY:
358 case R600_QUERY_GPU_BCI_BUSY:
359 case R600_QUERY_GPU_SC_BUSY:
360 case R600_QUERY_GPU_PA_BUSY:
361 case R600_QUERY_GPU_DB_BUSY:
362 case R600_QUERY_GPU_CP_BUSY:
363 case R600_QUERY_GPU_CB_BUSY:
364 case R600_QUERY_GPU_SDMA_BUSY:
365 case R600_QUERY_GPU_PFP_BUSY:
366 case R600_QUERY_GPU_MEQ_BUSY:
367 case R600_QUERY_GPU_ME_BUSY:
368 case R600_QUERY_GPU_SURF_SYNC_BUSY:
369 case R600_QUERY_GPU_DMA_BUSY:
370 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
371 case R600_QUERY_GPU_CE_BUSY:
372 query->end_result = r600_end_counter(rctx->screen,
373 query->b.type,
374 query->begin_result);
375 query->begin_result = 0;
376 break;
377 case R600_QUERY_NUM_COMPILATIONS:
378 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
379 break;
380 case R600_QUERY_NUM_SHADERS_CREATED:
381 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
382 break;
383 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
384 query->end_result = rctx->last_tex_ps_draw_ratio;
385 break;
386 case R600_QUERY_NUM_SHADER_CACHE_HITS:
387 query->end_result =
388 p_atomic_read(&rctx->screen->num_shader_cache_hits);
389 break;
390 case R600_QUERY_GPIN_ASIC_ID:
391 case R600_QUERY_GPIN_NUM_SIMD:
392 case R600_QUERY_GPIN_NUM_RB:
393 case R600_QUERY_GPIN_NUM_SPI:
394 case R600_QUERY_GPIN_NUM_SE:
395 break;
396 default:
397 unreachable("r600_query_sw_end: bad query type");
398 }
399
400 return true;
401 }
402
403 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
404 struct r600_query *rquery,
405 bool wait,
406 union pipe_query_result *result)
407 {
408 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
409
410 switch (query->b.type) {
411 case PIPE_QUERY_TIMESTAMP_DISJOINT:
412 /* Convert from cycles per millisecond to cycles per second (Hz). */
413 result->timestamp_disjoint.frequency =
414 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
415 result->timestamp_disjoint.disjoint = false;
416 return true;
417 case PIPE_QUERY_GPU_FINISHED: {
418 struct pipe_screen *screen = rctx->b.screen;
419 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
420
421 result->b = screen->fence_finish(screen, ctx, query->fence,
422 wait ? PIPE_TIMEOUT_INFINITE : 0);
423 return result->b;
424 }
425
426 case R600_QUERY_GFX_BO_LIST_SIZE:
427 result->u64 = (query->end_result - query->begin_result) /
428 (query->end_time - query->begin_time);
429 return true;
430 case R600_QUERY_CS_THREAD_BUSY:
431 case R600_QUERY_GALLIUM_THREAD_BUSY:
432 result->u64 = (query->end_result - query->begin_result) * 100 /
433 (query->end_time - query->begin_time);
434 return true;
435 case R600_QUERY_GPIN_ASIC_ID:
436 result->u32 = 0;
437 return true;
438 case R600_QUERY_GPIN_NUM_SIMD:
439 result->u32 = rctx->screen->info.num_good_compute_units;
440 return true;
441 case R600_QUERY_GPIN_NUM_RB:
442 result->u32 = rctx->screen->info.num_render_backends;
443 return true;
444 case R600_QUERY_GPIN_NUM_SPI:
445 result->u32 = 1; /* all supported chips have one SPI per SE */
446 return true;
447 case R600_QUERY_GPIN_NUM_SE:
448 result->u32 = rctx->screen->info.max_se;
449 return true;
450 }
451
452 result->u64 = query->end_result - query->begin_result;
453
454 switch (query->b.type) {
455 case R600_QUERY_BUFFER_WAIT_TIME:
456 case R600_QUERY_GPU_TEMPERATURE:
457 result->u64 /= 1000;
458 break;
459 case R600_QUERY_CURRENT_GPU_SCLK:
460 case R600_QUERY_CURRENT_GPU_MCLK:
461 result->u64 *= 1000000;
462 break;
463 }
464
465 return true;
466 }
467
468
469 static struct r600_query_ops sw_query_ops = {
470 .destroy = r600_query_sw_destroy,
471 .begin = r600_query_sw_begin,
472 .end = r600_query_sw_end,
473 .get_result = r600_query_sw_get_result,
474 .get_result_resource = NULL
475 };
476
477 static struct pipe_query *r600_query_sw_create(unsigned query_type)
478 {
479 struct r600_query_sw *query;
480
481 query = CALLOC_STRUCT(r600_query_sw);
482 if (!query)
483 return NULL;
484
485 query->b.type = query_type;
486 query->b.ops = &sw_query_ops;
487
488 return (struct pipe_query *)query;
489 }
490
491 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
492 struct r600_query *rquery)
493 {
494 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
495 struct r600_query_buffer *prev = query->buffer.previous;
496
497 /* Release all query buffers. */
498 while (prev) {
499 struct r600_query_buffer *qbuf = prev;
500 prev = prev->previous;
501 r600_resource_reference(&qbuf->buf, NULL);
502 FREE(qbuf);
503 }
504
505 r600_resource_reference(&query->buffer.buf, NULL);
506 FREE(rquery);
507 }
508
509 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
510 struct r600_query_hw *query)
511 {
512 unsigned buf_size = MAX2(query->result_size,
513 rscreen->info.min_alloc_size);
514
515 /* Queries are normally read by the CPU after
516 * being written by the gpu, hence staging is probably a good
517 * usage pattern.
518 */
519 struct r600_resource *buf = (struct r600_resource*)
520 pipe_buffer_create(&rscreen->b, 0,
521 PIPE_USAGE_STAGING, buf_size);
522 if (!buf)
523 return NULL;
524
525 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
526 r600_resource_reference(&buf, NULL);
527 return NULL;
528 }
529
530 return buf;
531 }
532
533 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
534 struct r600_query_hw *query,
535 struct r600_resource *buffer)
536 {
537 /* Callers ensure that the buffer is currently unused by the GPU. */
538 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
539 PIPE_TRANSFER_WRITE |
540 PIPE_TRANSFER_UNSYNCHRONIZED);
541 if (!results)
542 return false;
543
544 memset(results, 0, buffer->b.b.width0);
545
546 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
547 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
548 unsigned max_rbs = rscreen->info.num_render_backends;
549 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
550 unsigned num_results;
551 unsigned i, j;
552
553 /* Set top bits for unused backends. */
554 num_results = buffer->b.b.width0 / query->result_size;
555 for (j = 0; j < num_results; j++) {
556 for (i = 0; i < max_rbs; i++) {
557 if (!(enabled_rb_mask & (1<<i))) {
558 results[(i * 4)+1] = 0x80000000;
559 results[(i * 4)+3] = 0x80000000;
560 }
561 }
562 results += 4 * max_rbs;
563 }
564 }
565
566 return true;
567 }
568
569 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
570 struct r600_query *rquery,
571 bool wait,
572 enum pipe_query_value_type result_type,
573 int index,
574 struct pipe_resource *resource,
575 unsigned offset);
576
577 static struct r600_query_ops query_hw_ops = {
578 .destroy = r600_query_hw_destroy,
579 .begin = r600_query_hw_begin,
580 .end = r600_query_hw_end,
581 .get_result = r600_query_hw_get_result,
582 .get_result_resource = r600_query_hw_get_result_resource,
583 };
584
585 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
586 struct r600_query_hw *query,
587 struct r600_resource *buffer,
588 uint64_t va);
589 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
590 struct r600_query_hw *query,
591 struct r600_resource *buffer,
592 uint64_t va);
593 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
594 struct r600_query_hw *, void *buffer,
595 union pipe_query_result *result);
596 static void r600_query_hw_clear_result(struct r600_query_hw *,
597 union pipe_query_result *);
598
599 static struct r600_query_hw_ops query_hw_default_hw_ops = {
600 .prepare_buffer = r600_query_hw_prepare_buffer,
601 .emit_start = r600_query_hw_do_emit_start,
602 .emit_stop = r600_query_hw_do_emit_stop,
603 .clear_result = r600_query_hw_clear_result,
604 .add_result = r600_query_hw_add_result,
605 };
606
607 bool r600_query_hw_init(struct r600_common_screen *rscreen,
608 struct r600_query_hw *query)
609 {
610 query->buffer.buf = r600_new_query_buffer(rscreen, query);
611 if (!query->buffer.buf)
612 return false;
613
614 return true;
615 }
616
617 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
618 unsigned query_type,
619 unsigned index)
620 {
621 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
622 if (!query)
623 return NULL;
624
625 query->b.type = query_type;
626 query->b.ops = &query_hw_ops;
627 query->ops = &query_hw_default_hw_ops;
628
629 switch (query_type) {
630 case PIPE_QUERY_OCCLUSION_COUNTER:
631 case PIPE_QUERY_OCCLUSION_PREDICATE:
632 query->result_size = 16 * rscreen->info.num_render_backends;
633 query->result_size += 16; /* for the fence + alignment */
634 query->num_cs_dw_begin = 6;
635 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
636 break;
637 case PIPE_QUERY_TIME_ELAPSED:
638 query->result_size = 24;
639 query->num_cs_dw_begin = 8;
640 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
641 break;
642 case PIPE_QUERY_TIMESTAMP:
643 query->result_size = 16;
644 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
645 query->flags = R600_QUERY_HW_FLAG_NO_START;
646 break;
647 case PIPE_QUERY_PRIMITIVES_EMITTED:
648 case PIPE_QUERY_PRIMITIVES_GENERATED:
649 case PIPE_QUERY_SO_STATISTICS:
650 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
651 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
652 query->result_size = 32;
653 query->num_cs_dw_begin = 6;
654 query->num_cs_dw_end = 6;
655 query->stream = index;
656 break;
657 case PIPE_QUERY_PIPELINE_STATISTICS:
658 /* 11 values on EG, 8 on R600. */
659 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
660 query->result_size += 8; /* for the fence + alignment */
661 query->num_cs_dw_begin = 6;
662 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
663 break;
664 default:
665 assert(0);
666 FREE(query);
667 return NULL;
668 }
669
670 if (!r600_query_hw_init(rscreen, query)) {
671 FREE(query);
672 return NULL;
673 }
674
675 return (struct pipe_query *)query;
676 }
677
678 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
679 unsigned type, int diff)
680 {
681 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
682 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
683 bool old_enable = rctx->num_occlusion_queries != 0;
684 bool old_perfect_enable =
685 rctx->num_perfect_occlusion_queries != 0;
686 bool enable, perfect_enable;
687
688 rctx->num_occlusion_queries += diff;
689 assert(rctx->num_occlusion_queries >= 0);
690
691 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
692 rctx->num_perfect_occlusion_queries += diff;
693 assert(rctx->num_perfect_occlusion_queries >= 0);
694 }
695
696 enable = rctx->num_occlusion_queries != 0;
697 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
698
699 if (enable != old_enable || perfect_enable != old_perfect_enable) {
700 rctx->set_occlusion_query_state(&rctx->b, enable);
701 }
702 }
703 }
704
705 static unsigned event_type_for_stream(struct r600_query_hw *query)
706 {
707 switch (query->stream) {
708 default:
709 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
710 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
711 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
712 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
713 }
714 }
715
716 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
717 struct r600_query_hw *query,
718 struct r600_resource *buffer,
719 uint64_t va)
720 {
721 struct radeon_winsys_cs *cs = ctx->gfx.cs;
722
723 switch (query->b.type) {
724 case PIPE_QUERY_OCCLUSION_COUNTER:
725 case PIPE_QUERY_OCCLUSION_PREDICATE:
726 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
727 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
728 radeon_emit(cs, va);
729 radeon_emit(cs, va >> 32);
730 break;
731 case PIPE_QUERY_PRIMITIVES_EMITTED:
732 case PIPE_QUERY_PRIMITIVES_GENERATED:
733 case PIPE_QUERY_SO_STATISTICS:
734 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
735 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
736 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
737 radeon_emit(cs, va);
738 radeon_emit(cs, va >> 32);
739 break;
740 case PIPE_QUERY_TIME_ELAPSED:
741 if (ctx->chip_class >= SI) {
742 /* Write the timestamp from the CP not waiting for
743 * outstanding draws (top-of-pipe).
744 */
745 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
746 radeon_emit(cs, COPY_DATA_COUNT_SEL |
747 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
748 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
749 radeon_emit(cs, 0);
750 radeon_emit(cs, 0);
751 radeon_emit(cs, va);
752 radeon_emit(cs, va >> 32);
753 } else {
754 /* Write the timestamp after the last draw is done.
755 * (bottom-of-pipe)
756 */
757 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
758 0, 3, NULL, va, 0, 0);
759 }
760 break;
761 case PIPE_QUERY_PIPELINE_STATISTICS:
762 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
763 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
764 radeon_emit(cs, va);
765 radeon_emit(cs, va >> 32);
766 break;
767 default:
768 assert(0);
769 }
770 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
771 RADEON_PRIO_QUERY);
772 }
773
774 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
775 struct r600_query_hw *query)
776 {
777 uint64_t va;
778
779 if (!query->buffer.buf)
780 return; // previous buffer allocation failure
781
782 r600_update_occlusion_query_state(ctx, query->b.type, 1);
783 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
784
785 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
786 true);
787
788 /* Get a new query buffer if needed. */
789 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
790 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
791 *qbuf = query->buffer;
792 query->buffer.results_end = 0;
793 query->buffer.previous = qbuf;
794 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
795 if (!query->buffer.buf)
796 return;
797 }
798
799 /* emit begin query */
800 va = query->buffer.buf->gpu_address + query->buffer.results_end;
801
802 query->ops->emit_start(ctx, query, query->buffer.buf, va);
803
804 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
805 }
806
807 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
808 struct r600_query_hw *query,
809 struct r600_resource *buffer,
810 uint64_t va)
811 {
812 struct radeon_winsys_cs *cs = ctx->gfx.cs;
813 uint64_t fence_va = 0;
814
815 switch (query->b.type) {
816 case PIPE_QUERY_OCCLUSION_COUNTER:
817 case PIPE_QUERY_OCCLUSION_PREDICATE:
818 va += 8;
819 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
820 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
821 radeon_emit(cs, va);
822 radeon_emit(cs, va >> 32);
823
824 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
825 break;
826 case PIPE_QUERY_PRIMITIVES_EMITTED:
827 case PIPE_QUERY_PRIMITIVES_GENERATED:
828 case PIPE_QUERY_SO_STATISTICS:
829 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
830 va += query->result_size/2;
831 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
832 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
833 radeon_emit(cs, va);
834 radeon_emit(cs, va >> 32);
835 break;
836 case PIPE_QUERY_TIME_ELAPSED:
837 va += 8;
838 /* fall through */
839 case PIPE_QUERY_TIMESTAMP:
840 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
841 0, 3, NULL, va, 0, 0);
842 fence_va = va + 8;
843 break;
844 case PIPE_QUERY_PIPELINE_STATISTICS: {
845 unsigned sample_size = (query->result_size - 8) / 2;
846
847 va += sample_size;
848 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
849 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
850 radeon_emit(cs, va);
851 radeon_emit(cs, va >> 32);
852
853 fence_va = va + sample_size;
854 break;
855 }
856 default:
857 assert(0);
858 }
859 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
860 RADEON_PRIO_QUERY);
861
862 if (fence_va)
863 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
864 query->buffer.buf, fence_va, 0, 0x80000000);
865 }
866
867 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
868 struct r600_query_hw *query)
869 {
870 uint64_t va;
871
872 if (!query->buffer.buf)
873 return; // previous buffer allocation failure
874
875 /* The queries which need begin already called this in begin_query. */
876 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
877 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
878 }
879
880 /* emit end query */
881 va = query->buffer.buf->gpu_address + query->buffer.results_end;
882
883 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
884
885 query->buffer.results_end += query->result_size;
886
887 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
888 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
889
890 r600_update_occlusion_query_state(ctx, query->b.type, -1);
891 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
892 }
893
894 static void r600_emit_query_predication(struct r600_common_context *ctx,
895 struct r600_atom *atom)
896 {
897 struct radeon_winsys_cs *cs = ctx->gfx.cs;
898 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
899 struct r600_query_buffer *qbuf;
900 uint32_t op;
901 bool flag_wait, invert;
902
903 if (!query)
904 return;
905
906 invert = ctx->render_cond_invert;
907 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
908 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
909
910 switch (query->b.type) {
911 case PIPE_QUERY_OCCLUSION_COUNTER:
912 case PIPE_QUERY_OCCLUSION_PREDICATE:
913 op = PRED_OP(PREDICATION_OP_ZPASS);
914 break;
915 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
916 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
917 invert = !invert;
918 break;
919 default:
920 assert(0);
921 return;
922 }
923
924 /* if true then invert, see GL_ARB_conditional_render_inverted */
925 if (invert)
926 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
927 else
928 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
929
930 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
931
932 /* emit predicate packets for all data blocks */
933 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
934 unsigned results_base = 0;
935 uint64_t va_base = qbuf->buf->gpu_address;
936
937 while (results_base < qbuf->results_end) {
938 uint64_t va = va_base + results_base;
939
940 if (ctx->chip_class >= GFX9) {
941 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
942 radeon_emit(cs, op);
943 radeon_emit(cs, va);
944 radeon_emit(cs, va >> 32);
945 } else {
946 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
947 radeon_emit(cs, va);
948 radeon_emit(cs, op | ((va >> 32) & 0xFF));
949 }
950 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
951 RADEON_PRIO_QUERY);
952 results_base += query->result_size;
953
954 /* set CONTINUE bit for all packets except the first */
955 op |= PREDICATION_CONTINUE;
956 }
957 }
958 }
959
960 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
961 {
962 struct r600_common_screen *rscreen =
963 (struct r600_common_screen *)ctx->screen;
964
965 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
966 query_type == PIPE_QUERY_GPU_FINISHED ||
967 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
968 return r600_query_sw_create(query_type);
969
970 return r600_query_hw_create(rscreen, query_type, index);
971 }
972
973 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
974 {
975 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
976 struct r600_query *rquery = (struct r600_query *)query;
977
978 rquery->ops->destroy(rctx->screen, rquery);
979 }
980
981 static boolean r600_begin_query(struct pipe_context *ctx,
982 struct pipe_query *query)
983 {
984 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
985 struct r600_query *rquery = (struct r600_query *)query;
986
987 return rquery->ops->begin(rctx, rquery);
988 }
989
990 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
991 struct r600_query_hw *query)
992 {
993 struct r600_query_buffer *prev = query->buffer.previous;
994
995 /* Discard the old query buffers. */
996 while (prev) {
997 struct r600_query_buffer *qbuf = prev;
998 prev = prev->previous;
999 r600_resource_reference(&qbuf->buf, NULL);
1000 FREE(qbuf);
1001 }
1002
1003 query->buffer.results_end = 0;
1004 query->buffer.previous = NULL;
1005
1006 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1007 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1008 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1009 r600_resource_reference(&query->buffer.buf, NULL);
1010 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1011 } else {
1012 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1013 r600_resource_reference(&query->buffer.buf, NULL);
1014 }
1015 }
1016
1017 bool r600_query_hw_begin(struct r600_common_context *rctx,
1018 struct r600_query *rquery)
1019 {
1020 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1021
1022 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1023 assert(0);
1024 return false;
1025 }
1026
1027 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1028 r600_query_hw_reset_buffers(rctx, query);
1029
1030 r600_query_hw_emit_start(rctx, query);
1031 if (!query->buffer.buf)
1032 return false;
1033
1034 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1035 return true;
1036 }
1037
1038 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1039 {
1040 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1041 struct r600_query *rquery = (struct r600_query *)query;
1042
1043 return rquery->ops->end(rctx, rquery);
1044 }
1045
1046 bool r600_query_hw_end(struct r600_common_context *rctx,
1047 struct r600_query *rquery)
1048 {
1049 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1050
1051 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1052 r600_query_hw_reset_buffers(rctx, query);
1053
1054 r600_query_hw_emit_stop(rctx, query);
1055
1056 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1057 LIST_DELINIT(&query->list);
1058
1059 if (!query->buffer.buf)
1060 return false;
1061
1062 return true;
1063 }
1064
1065 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1066 struct r600_query_hw *rquery, int index,
1067 struct r600_hw_query_params *params)
1068 {
1069 unsigned max_rbs = rctx->screen->info.num_render_backends;
1070
1071 params->pair_stride = 0;
1072 params->pair_count = 1;
1073
1074 switch (rquery->b.type) {
1075 case PIPE_QUERY_OCCLUSION_COUNTER:
1076 case PIPE_QUERY_OCCLUSION_PREDICATE:
1077 params->start_offset = 0;
1078 params->end_offset = 8;
1079 params->fence_offset = max_rbs * 16;
1080 params->pair_stride = 16;
1081 params->pair_count = max_rbs;
1082 break;
1083 case PIPE_QUERY_TIME_ELAPSED:
1084 params->start_offset = 0;
1085 params->end_offset = 8;
1086 params->fence_offset = 16;
1087 break;
1088 case PIPE_QUERY_TIMESTAMP:
1089 params->start_offset = 0;
1090 params->end_offset = 0;
1091 params->fence_offset = 8;
1092 break;
1093 case PIPE_QUERY_PRIMITIVES_EMITTED:
1094 params->start_offset = 8;
1095 params->end_offset = 24;
1096 params->fence_offset = params->end_offset + 4;
1097 break;
1098 case PIPE_QUERY_PRIMITIVES_GENERATED:
1099 params->start_offset = 0;
1100 params->end_offset = 16;
1101 params->fence_offset = params->end_offset + 4;
1102 break;
1103 case PIPE_QUERY_SO_STATISTICS:
1104 params->start_offset = 8 - index * 8;
1105 params->end_offset = 24 - index * 8;
1106 params->fence_offset = params->end_offset + 4;
1107 break;
1108 case PIPE_QUERY_PIPELINE_STATISTICS:
1109 {
1110 /* Offsets apply to EG+ */
1111 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1112 params->start_offset = offsets[index];
1113 params->end_offset = 88 + offsets[index];
1114 params->fence_offset = 2 * 88;
1115 break;
1116 }
1117 default:
1118 unreachable("r600_get_hw_query_params unsupported");
1119 }
1120 }
1121
1122 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1123 bool test_status_bit)
1124 {
1125 uint32_t *current_result = (uint32_t*)map;
1126 uint64_t start, end;
1127
1128 start = (uint64_t)current_result[start_index] |
1129 (uint64_t)current_result[start_index+1] << 32;
1130 end = (uint64_t)current_result[end_index] |
1131 (uint64_t)current_result[end_index+1] << 32;
1132
1133 if (!test_status_bit ||
1134 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1135 return end - start;
1136 }
1137 return 0;
1138 }
1139
1140 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1141 struct r600_query_hw *query,
1142 void *buffer,
1143 union pipe_query_result *result)
1144 {
1145 unsigned max_rbs = rscreen->info.num_render_backends;
1146
1147 switch (query->b.type) {
1148 case PIPE_QUERY_OCCLUSION_COUNTER: {
1149 for (unsigned i = 0; i < max_rbs; ++i) {
1150 unsigned results_base = i * 16;
1151 result->u64 +=
1152 r600_query_read_result(buffer + results_base, 0, 2, true);
1153 }
1154 break;
1155 }
1156 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1157 for (unsigned i = 0; i < max_rbs; ++i) {
1158 unsigned results_base = i * 16;
1159 result->b = result->b ||
1160 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1161 }
1162 break;
1163 }
1164 case PIPE_QUERY_TIME_ELAPSED:
1165 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1166 break;
1167 case PIPE_QUERY_TIMESTAMP:
1168 result->u64 = *(uint64_t*)buffer;
1169 break;
1170 case PIPE_QUERY_PRIMITIVES_EMITTED:
1171 /* SAMPLE_STREAMOUTSTATS stores this structure:
1172 * {
1173 * u64 NumPrimitivesWritten;
1174 * u64 PrimitiveStorageNeeded;
1175 * }
1176 * We only need NumPrimitivesWritten here. */
1177 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1178 break;
1179 case PIPE_QUERY_PRIMITIVES_GENERATED:
1180 /* Here we read PrimitiveStorageNeeded. */
1181 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1182 break;
1183 case PIPE_QUERY_SO_STATISTICS:
1184 result->so_statistics.num_primitives_written +=
1185 r600_query_read_result(buffer, 2, 6, true);
1186 result->so_statistics.primitives_storage_needed +=
1187 r600_query_read_result(buffer, 0, 4, true);
1188 break;
1189 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1190 result->b = result->b ||
1191 r600_query_read_result(buffer, 2, 6, true) !=
1192 r600_query_read_result(buffer, 0, 4, true);
1193 break;
1194 case PIPE_QUERY_PIPELINE_STATISTICS:
1195 if (rscreen->chip_class >= EVERGREEN) {
1196 result->pipeline_statistics.ps_invocations +=
1197 r600_query_read_result(buffer, 0, 22, false);
1198 result->pipeline_statistics.c_primitives +=
1199 r600_query_read_result(buffer, 2, 24, false);
1200 result->pipeline_statistics.c_invocations +=
1201 r600_query_read_result(buffer, 4, 26, false);
1202 result->pipeline_statistics.vs_invocations +=
1203 r600_query_read_result(buffer, 6, 28, false);
1204 result->pipeline_statistics.gs_invocations +=
1205 r600_query_read_result(buffer, 8, 30, false);
1206 result->pipeline_statistics.gs_primitives +=
1207 r600_query_read_result(buffer, 10, 32, false);
1208 result->pipeline_statistics.ia_primitives +=
1209 r600_query_read_result(buffer, 12, 34, false);
1210 result->pipeline_statistics.ia_vertices +=
1211 r600_query_read_result(buffer, 14, 36, false);
1212 result->pipeline_statistics.hs_invocations +=
1213 r600_query_read_result(buffer, 16, 38, false);
1214 result->pipeline_statistics.ds_invocations +=
1215 r600_query_read_result(buffer, 18, 40, false);
1216 result->pipeline_statistics.cs_invocations +=
1217 r600_query_read_result(buffer, 20, 42, false);
1218 } else {
1219 result->pipeline_statistics.ps_invocations +=
1220 r600_query_read_result(buffer, 0, 16, false);
1221 result->pipeline_statistics.c_primitives +=
1222 r600_query_read_result(buffer, 2, 18, false);
1223 result->pipeline_statistics.c_invocations +=
1224 r600_query_read_result(buffer, 4, 20, false);
1225 result->pipeline_statistics.vs_invocations +=
1226 r600_query_read_result(buffer, 6, 22, false);
1227 result->pipeline_statistics.gs_invocations +=
1228 r600_query_read_result(buffer, 8, 24, false);
1229 result->pipeline_statistics.gs_primitives +=
1230 r600_query_read_result(buffer, 10, 26, false);
1231 result->pipeline_statistics.ia_primitives +=
1232 r600_query_read_result(buffer, 12, 28, false);
1233 result->pipeline_statistics.ia_vertices +=
1234 r600_query_read_result(buffer, 14, 30, false);
1235 }
1236 #if 0 /* for testing */
1237 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1238 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1239 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1240 result->pipeline_statistics.ia_vertices,
1241 result->pipeline_statistics.ia_primitives,
1242 result->pipeline_statistics.vs_invocations,
1243 result->pipeline_statistics.hs_invocations,
1244 result->pipeline_statistics.ds_invocations,
1245 result->pipeline_statistics.gs_invocations,
1246 result->pipeline_statistics.gs_primitives,
1247 result->pipeline_statistics.c_invocations,
1248 result->pipeline_statistics.c_primitives,
1249 result->pipeline_statistics.ps_invocations,
1250 result->pipeline_statistics.cs_invocations);
1251 #endif
1252 break;
1253 default:
1254 assert(0);
1255 }
1256 }
1257
1258 static boolean r600_get_query_result(struct pipe_context *ctx,
1259 struct pipe_query *query, boolean wait,
1260 union pipe_query_result *result)
1261 {
1262 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1263 struct r600_query *rquery = (struct r600_query *)query;
1264
1265 return rquery->ops->get_result(rctx, rquery, wait, result);
1266 }
1267
1268 static void r600_get_query_result_resource(struct pipe_context *ctx,
1269 struct pipe_query *query,
1270 boolean wait,
1271 enum pipe_query_value_type result_type,
1272 int index,
1273 struct pipe_resource *resource,
1274 unsigned offset)
1275 {
1276 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1277 struct r600_query *rquery = (struct r600_query *)query;
1278
1279 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1280 resource, offset);
1281 }
1282
1283 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1284 union pipe_query_result *result)
1285 {
1286 util_query_clear_result(result, query->b.type);
1287 }
1288
1289 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1290 struct r600_query *rquery,
1291 bool wait, union pipe_query_result *result)
1292 {
1293 struct r600_common_screen *rscreen = rctx->screen;
1294 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1295 struct r600_query_buffer *qbuf;
1296
1297 query->ops->clear_result(query, result);
1298
1299 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1300 unsigned usage = PIPE_TRANSFER_READ |
1301 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1302 unsigned results_base = 0;
1303 void *map;
1304
1305 if (rquery->b.flushed)
1306 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1307 else
1308 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1309
1310 if (!map)
1311 return false;
1312
1313 while (results_base != qbuf->results_end) {
1314 query->ops->add_result(rscreen, query, map + results_base,
1315 result);
1316 results_base += query->result_size;
1317 }
1318 }
1319
1320 /* Convert the time to expected units. */
1321 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1322 rquery->type == PIPE_QUERY_TIMESTAMP) {
1323 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1324 }
1325 return true;
1326 }
1327
1328 /* Create the compute shader that is used to collect the results.
1329 *
1330 * One compute grid with a single thread is launched for every query result
1331 * buffer. The thread (optionally) reads a previous summary buffer, then
1332 * accumulates data from the query result buffer, and writes the result either
1333 * to a summary buffer to be consumed by the next grid invocation or to the
1334 * user-supplied buffer.
1335 *
1336 * Data layout:
1337 *
1338 * CONST
1339 * 0.x = end_offset
1340 * 0.y = result_stride
1341 * 0.z = result_count
1342 * 0.w = bit field:
1343 * 1: read previously accumulated values
1344 * 2: write accumulated values for chaining
1345 * 4: write result available
1346 * 8: convert result to boolean (0/1)
1347 * 16: only read one dword and use that as result
1348 * 32: apply timestamp conversion
1349 * 64: store full 64 bits result
1350 * 128: store signed 32 bits result
1351 * 1.x = fence_offset
1352 * 1.y = pair_stride
1353 * 1.z = pair_count
1354 *
1355 * BUFFER[0] = query result buffer
1356 * BUFFER[1] = previous summary buffer
1357 * BUFFER[2] = next summary buffer or user-supplied buffer
1358 */
1359 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1360 {
1361 /* TEMP[0].xy = accumulated result so far
1362 * TEMP[0].z = result not available
1363 *
1364 * TEMP[1].x = current result index
1365 * TEMP[1].y = current pair index
1366 */
1367 static const char text_tmpl[] =
1368 "COMP\n"
1369 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1370 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1371 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1372 "DCL BUFFER[0]\n"
1373 "DCL BUFFER[1]\n"
1374 "DCL BUFFER[2]\n"
1375 "DCL CONST[0..1]\n"
1376 "DCL TEMP[0..5]\n"
1377 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1378 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1379 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1380 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1381 "IMM[4] UINT32 {0, 0, 0, 0}\n"
1382
1383 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1384 "UIF TEMP[5]\n"
1385 /* Check result availability. */
1386 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1387 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1388 "MOV TEMP[1], TEMP[0].zzzz\n"
1389 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1390
1391 /* Load result if available. */
1392 "UIF TEMP[1]\n"
1393 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1394 "ENDIF\n"
1395 "ELSE\n"
1396 /* Load previously accumulated result if requested. */
1397 "MOV TEMP[0], IMM[0].xxxx\n"
1398 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1399 "UIF TEMP[4]\n"
1400 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1401 "ENDIF\n"
1402
1403 "MOV TEMP[1].x, IMM[0].xxxx\n"
1404 "BGNLOOP\n"
1405 /* Break if accumulated result so far is not available. */
1406 "UIF TEMP[0].zzzz\n"
1407 "BRK\n"
1408 "ENDIF\n"
1409
1410 /* Break if result_index >= result_count. */
1411 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1412 "UIF TEMP[5]\n"
1413 "BRK\n"
1414 "ENDIF\n"
1415
1416 /* Load fence and check result availability */
1417 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1418 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1419 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1420 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1421 "UIF TEMP[0].zzzz\n"
1422 "BRK\n"
1423 "ENDIF\n"
1424
1425 "MOV TEMP[1].y, IMM[0].xxxx\n"
1426 "BGNLOOP\n"
1427 /* Load start and end. */
1428 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1429 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1430 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1431
1432 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1433 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1434
1435 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1436 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1437
1438 /* Increment pair index */
1439 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1440 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1441 "UIF TEMP[5]\n"
1442 "BRK\n"
1443 "ENDIF\n"
1444 "ENDLOOP\n"
1445
1446 /* Increment result index */
1447 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1448 "ENDLOOP\n"
1449 "ENDIF\n"
1450
1451 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1452 "UIF TEMP[4]\n"
1453 /* Store accumulated data for chaining. */
1454 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1455 "ELSE\n"
1456 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1457 "UIF TEMP[4]\n"
1458 /* Store result availability. */
1459 "NOT TEMP[0].z, TEMP[0]\n"
1460 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1461 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1462
1463 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1464 "UIF TEMP[4]\n"
1465 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1466 "ENDIF\n"
1467 "ELSE\n"
1468 /* Store result if it is available. */
1469 "NOT TEMP[4], TEMP[0].zzzz\n"
1470 "UIF TEMP[4]\n"
1471 /* Apply timestamp conversion */
1472 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1473 "UIF TEMP[4]\n"
1474 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1475 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1476 "ENDIF\n"
1477
1478 /* Convert to boolean */
1479 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1480 "UIF TEMP[4]\n"
1481 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1482 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1483 "MOV TEMP[0].y, IMM[0].xxxx\n"
1484 "ENDIF\n"
1485
1486 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1487 "UIF TEMP[4]\n"
1488 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1489 "ELSE\n"
1490 /* Clamping */
1491 "UIF TEMP[0].yyyy\n"
1492 "MOV TEMP[0].x, IMM[0].wwww\n"
1493 "ENDIF\n"
1494
1495 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1496 "UIF TEMP[4]\n"
1497 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1498 "ENDIF\n"
1499
1500 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1501 "ENDIF\n"
1502 "ENDIF\n"
1503 "ENDIF\n"
1504 "ENDIF\n"
1505
1506 "END\n";
1507
1508 char text[sizeof(text_tmpl) + 32];
1509 struct tgsi_token tokens[1024];
1510 struct pipe_compute_state state = {};
1511
1512 /* Hard code the frequency into the shader so that the backend can
1513 * use the full range of optimizations for divide-by-constant.
1514 */
1515 snprintf(text, sizeof(text), text_tmpl,
1516 rctx->screen->info.clock_crystal_freq);
1517
1518 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1519 assert(false);
1520 return;
1521 }
1522
1523 state.ir_type = PIPE_SHADER_IR_TGSI;
1524 state.prog = tokens;
1525
1526 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1527 }
1528
1529 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1530 struct r600_qbo_state *st)
1531 {
1532 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1533
1534 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1535 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1536
1537 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1538 for (unsigned i = 0; i < 3; ++i)
1539 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1540 }
1541
1542 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1543 struct r600_query *rquery,
1544 bool wait,
1545 enum pipe_query_value_type result_type,
1546 int index,
1547 struct pipe_resource *resource,
1548 unsigned offset)
1549 {
1550 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1551 struct r600_query_buffer *qbuf;
1552 struct r600_query_buffer *qbuf_prev;
1553 struct pipe_resource *tmp_buffer = NULL;
1554 unsigned tmp_buffer_offset = 0;
1555 struct r600_qbo_state saved_state = {};
1556 struct pipe_grid_info grid = {};
1557 struct pipe_constant_buffer constant_buffer = {};
1558 struct pipe_shader_buffer ssbo[3];
1559 struct r600_hw_query_params params;
1560 struct {
1561 uint32_t end_offset;
1562 uint32_t result_stride;
1563 uint32_t result_count;
1564 uint32_t config;
1565 uint32_t fence_offset;
1566 uint32_t pair_stride;
1567 uint32_t pair_count;
1568 } consts;
1569
1570 if (!rctx->query_result_shader) {
1571 r600_create_query_result_shader(rctx);
1572 if (!rctx->query_result_shader)
1573 return;
1574 }
1575
1576 if (query->buffer.previous) {
1577 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1578 &tmp_buffer_offset, &tmp_buffer);
1579 if (!tmp_buffer)
1580 return;
1581 }
1582
1583 rctx->save_qbo_state(&rctx->b, &saved_state);
1584
1585 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1586 consts.end_offset = params.end_offset - params.start_offset;
1587 consts.fence_offset = params.fence_offset - params.start_offset;
1588 consts.result_stride = query->result_size;
1589 consts.pair_stride = params.pair_stride;
1590 consts.pair_count = params.pair_count;
1591
1592 constant_buffer.buffer_size = sizeof(consts);
1593 constant_buffer.user_buffer = &consts;
1594
1595 ssbo[1].buffer = tmp_buffer;
1596 ssbo[1].buffer_offset = tmp_buffer_offset;
1597 ssbo[1].buffer_size = 16;
1598
1599 ssbo[2] = ssbo[1];
1600
1601 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1602
1603 grid.block[0] = 1;
1604 grid.block[1] = 1;
1605 grid.block[2] = 1;
1606 grid.grid[0] = 1;
1607 grid.grid[1] = 1;
1608 grid.grid[2] = 1;
1609
1610 consts.config = 0;
1611 if (index < 0)
1612 consts.config |= 4;
1613 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1614 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1615 consts.config |= 8;
1616 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1617 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1618 consts.config |= 32;
1619
1620 switch (result_type) {
1621 case PIPE_QUERY_TYPE_U64:
1622 case PIPE_QUERY_TYPE_I64:
1623 consts.config |= 64;
1624 break;
1625 case PIPE_QUERY_TYPE_I32:
1626 consts.config |= 128;
1627 break;
1628 case PIPE_QUERY_TYPE_U32:
1629 break;
1630 }
1631
1632 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1633
1634 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1635 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1636 qbuf_prev = qbuf->previous;
1637 consts.result_count = qbuf->results_end / query->result_size;
1638 consts.config &= ~3;
1639 if (qbuf != &query->buffer)
1640 consts.config |= 1;
1641 if (qbuf->previous)
1642 consts.config |= 2;
1643 } else {
1644 /* Only read the last timestamp. */
1645 qbuf_prev = NULL;
1646 consts.result_count = 0;
1647 consts.config |= 16;
1648 params.start_offset += qbuf->results_end - query->result_size;
1649 }
1650
1651 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1652
1653 ssbo[0].buffer = &qbuf->buf->b.b;
1654 ssbo[0].buffer_offset = params.start_offset;
1655 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1656
1657 if (!qbuf->previous) {
1658 ssbo[2].buffer = resource;
1659 ssbo[2].buffer_offset = offset;
1660 ssbo[2].buffer_size = 8;
1661
1662 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1663 }
1664
1665 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1666
1667 if (wait && qbuf == &query->buffer) {
1668 uint64_t va;
1669
1670 /* Wait for result availability. Wait only for readiness
1671 * of the last entry, since the fence writes should be
1672 * serialized in the CP.
1673 */
1674 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1675 va += params.fence_offset;
1676
1677 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1678 }
1679
1680 rctx->b.launch_grid(&rctx->b, &grid);
1681 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1682 }
1683
1684 r600_restore_qbo_state(rctx, &saved_state);
1685 pipe_resource_reference(&tmp_buffer, NULL);
1686 }
1687
1688 static void r600_render_condition(struct pipe_context *ctx,
1689 struct pipe_query *query,
1690 boolean condition,
1691 enum pipe_render_cond_flag mode)
1692 {
1693 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1694 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1695 struct r600_query_buffer *qbuf;
1696 struct r600_atom *atom = &rctx->render_cond_atom;
1697
1698 rctx->render_cond = query;
1699 rctx->render_cond_invert = condition;
1700 rctx->render_cond_mode = mode;
1701
1702 /* Compute the size of SET_PREDICATION packets. */
1703 atom->num_dw = 0;
1704 if (query) {
1705 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1706 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1707 }
1708
1709 rctx->set_atom_dirty(rctx, atom, query != NULL);
1710 }
1711
1712 void r600_suspend_queries(struct r600_common_context *ctx)
1713 {
1714 struct r600_query_hw *query;
1715
1716 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1717 r600_query_hw_emit_stop(ctx, query);
1718 }
1719 assert(ctx->num_cs_dw_queries_suspend == 0);
1720 }
1721
1722 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1723 struct list_head *query_list)
1724 {
1725 struct r600_query_hw *query;
1726 unsigned num_dw = 0;
1727
1728 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1729 /* begin + end */
1730 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1731
1732 /* Workaround for the fact that
1733 * num_cs_dw_nontimer_queries_suspend is incremented for every
1734 * resumed query, which raises the bar in need_cs_space for
1735 * queries about to be resumed.
1736 */
1737 num_dw += query->num_cs_dw_end;
1738 }
1739 /* primitives generated query */
1740 num_dw += ctx->streamout.enable_atom.num_dw;
1741 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1742 num_dw += 13;
1743
1744 return num_dw;
1745 }
1746
1747 void r600_resume_queries(struct r600_common_context *ctx)
1748 {
1749 struct r600_query_hw *query;
1750 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1751
1752 assert(ctx->num_cs_dw_queries_suspend == 0);
1753
1754 /* Check CS space here. Resuming must not be interrupted by flushes. */
1755 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1756
1757 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1758 r600_query_hw_emit_start(ctx, query);
1759 }
1760 }
1761
1762 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1763 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1764 {
1765 struct r600_common_context *ctx =
1766 (struct r600_common_context*)rscreen->aux_context;
1767 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1768 struct r600_resource *buffer;
1769 uint32_t *results;
1770 unsigned i, mask = 0;
1771 unsigned max_rbs = ctx->screen->info.num_render_backends;
1772
1773 assert(rscreen->chip_class <= CAYMAN);
1774
1775 /* if backend_map query is supported by the kernel */
1776 if (rscreen->info.r600_gb_backend_map_valid) {
1777 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1778 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1779 unsigned item_width, item_mask;
1780
1781 if (ctx->chip_class >= EVERGREEN) {
1782 item_width = 4;
1783 item_mask = 0x7;
1784 } else {
1785 item_width = 2;
1786 item_mask = 0x3;
1787 }
1788
1789 while (num_tile_pipes--) {
1790 i = backend_map & item_mask;
1791 mask |= (1<<i);
1792 backend_map >>= item_width;
1793 }
1794 if (mask != 0) {
1795 rscreen->info.enabled_rb_mask = mask;
1796 return;
1797 }
1798 }
1799
1800 /* otherwise backup path for older kernels */
1801
1802 /* create buffer for event data */
1803 buffer = (struct r600_resource*)
1804 pipe_buffer_create(ctx->b.screen, 0,
1805 PIPE_USAGE_STAGING, max_rbs * 16);
1806 if (!buffer)
1807 return;
1808
1809 /* initialize buffer with zeroes */
1810 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1811 if (results) {
1812 memset(results, 0, max_rbs * 4 * 4);
1813
1814 /* emit EVENT_WRITE for ZPASS_DONE */
1815 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1816 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1817 radeon_emit(cs, buffer->gpu_address);
1818 radeon_emit(cs, buffer->gpu_address >> 32);
1819
1820 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1821 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1822
1823 /* analyze results */
1824 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1825 if (results) {
1826 for(i = 0; i < max_rbs; i++) {
1827 /* at least highest bit will be set if backend is used */
1828 if (results[i*4 + 1])
1829 mask |= (1<<i);
1830 }
1831 }
1832 }
1833
1834 r600_resource_reference(&buffer, NULL);
1835
1836 if (mask)
1837 rscreen->info.enabled_rb_mask = mask;
1838 }
1839
1840 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1841 { \
1842 .name = name_, \
1843 .query_type = R600_QUERY_##query_type_, \
1844 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1845 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1846 .group_id = group_id_ \
1847 }
1848
1849 #define X(name_, query_type_, type_, result_type_) \
1850 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1851
1852 #define XG(group_, name_, query_type_, type_, result_type_) \
1853 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1854
1855 static struct pipe_driver_query_info r600_driver_query_list[] = {
1856 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1857 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1858 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1859 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1860 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1861 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1862 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1863 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1864 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1865 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1866 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1867 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1868 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1869 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1870 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1871 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1872 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1873 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1874 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1875 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1876 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1877 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1878 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1879 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1880 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1881 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1882 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1883 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1884 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1885 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1886 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1887 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1888 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1889 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1890 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1891 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1892 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1893 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1894 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1895 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1896
1897 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1898 * which use it as a fallback path to detect the GPU type.
1899 *
1900 * Note: The names of these queries are significant for GPUPerfStudio
1901 * (and possibly their order as well). */
1902 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1903 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1904 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1905 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1906 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1907
1908 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1909 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1910 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1911
1912 /* The following queries must be at the end of the list because their
1913 * availability is adjusted dynamically based on the DRM version. */
1914 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1915 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1916 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1917 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1918 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1919 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1920 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1921 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1922 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1923 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1924 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1925 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1926 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1927 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1928 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1929 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1930 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1931 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1932 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1933 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1934 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1935 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1936 };
1937
1938 #undef X
1939 #undef XG
1940 #undef XFULL
1941
1942 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1943 {
1944 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1945 return ARRAY_SIZE(r600_driver_query_list);
1946 else if (rscreen->info.drm_major == 3) {
1947 if (rscreen->chip_class >= VI)
1948 return ARRAY_SIZE(r600_driver_query_list);
1949 else
1950 return ARRAY_SIZE(r600_driver_query_list) - 7;
1951 }
1952 else
1953 return ARRAY_SIZE(r600_driver_query_list) - 25;
1954 }
1955
1956 static int r600_get_driver_query_info(struct pipe_screen *screen,
1957 unsigned index,
1958 struct pipe_driver_query_info *info)
1959 {
1960 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1961 unsigned num_queries = r600_get_num_queries(rscreen);
1962
1963 if (!info) {
1964 unsigned num_perfcounters =
1965 r600_get_perfcounter_info(rscreen, 0, NULL);
1966
1967 return num_queries + num_perfcounters;
1968 }
1969
1970 if (index >= num_queries)
1971 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1972
1973 *info = r600_driver_query_list[index];
1974
1975 switch (info->query_type) {
1976 case R600_QUERY_REQUESTED_VRAM:
1977 case R600_QUERY_VRAM_USAGE:
1978 case R600_QUERY_MAPPED_VRAM:
1979 info->max_value.u64 = rscreen->info.vram_size;
1980 break;
1981 case R600_QUERY_REQUESTED_GTT:
1982 case R600_QUERY_GTT_USAGE:
1983 case R600_QUERY_MAPPED_GTT:
1984 info->max_value.u64 = rscreen->info.gart_size;
1985 break;
1986 case R600_QUERY_GPU_TEMPERATURE:
1987 info->max_value.u64 = 125;
1988 break;
1989 case R600_QUERY_VRAM_VIS_USAGE:
1990 info->max_value.u64 = rscreen->info.vram_vis_size;
1991 break;
1992 }
1993
1994 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1995 info->group_id += rscreen->perfcounters->num_groups;
1996
1997 return 1;
1998 }
1999
2000 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2001 * performance counter groups, so be careful when changing this and related
2002 * functions.
2003 */
2004 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2005 unsigned index,
2006 struct pipe_driver_query_group_info *info)
2007 {
2008 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2009 unsigned num_pc_groups = 0;
2010
2011 if (rscreen->perfcounters)
2012 num_pc_groups = rscreen->perfcounters->num_groups;
2013
2014 if (!info)
2015 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2016
2017 if (index < num_pc_groups)
2018 return r600_get_perfcounter_group_info(rscreen, index, info);
2019
2020 index -= num_pc_groups;
2021 if (index >= R600_NUM_SW_QUERY_GROUPS)
2022 return 0;
2023
2024 info->name = "GPIN";
2025 info->max_active_queries = 5;
2026 info->num_queries = 5;
2027 return 1;
2028 }
2029
2030 void r600_query_init(struct r600_common_context *rctx)
2031 {
2032 rctx->b.create_query = r600_create_query;
2033 rctx->b.create_batch_query = r600_create_batch_query;
2034 rctx->b.destroy_query = r600_destroy_query;
2035 rctx->b.begin_query = r600_begin_query;
2036 rctx->b.end_query = r600_end_query;
2037 rctx->b.get_query_result = r600_get_query_result;
2038 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2039 rctx->render_cond_atom.emit = r600_emit_query_predication;
2040
2041 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2042 rctx->b.render_condition = r600_render_condition;
2043
2044 LIST_INITHEAD(&rctx->active_queries);
2045 }
2046
2047 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2048 {
2049 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2050 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2051 }