util: move os_time.[ch] to src/util
[mesa.git] / src / gallium / drivers / r600 / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_pipe.h"
27 #include "r600_cs.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include "util/os_time.h"
31 #include "tgsi/tgsi_text.h"
32
33 #define R600_MAX_STREAMS 4
34
35 struct r600_hw_query_params {
36 unsigned start_offset;
37 unsigned end_offset;
38 unsigned fence_offset;
39 unsigned pair_stride;
40 unsigned pair_count;
41 };
42
43 /* Queries without buffer handling or suspend/resume. */
44 struct r600_query_sw {
45 struct r600_query b;
46
47 uint64_t begin_result;
48 uint64_t end_result;
49
50 uint64_t begin_time;
51 uint64_t end_time;
52
53 /* Fence for GPU_FINISHED. */
54 struct pipe_fence_handle *fence;
55 };
56
57 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
58 struct r600_query *rquery)
59 {
60 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
61
62 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
63 FREE(query);
64 }
65
66 static enum radeon_value_id winsys_id_from_type(unsigned type)
67 {
68 switch (type) {
69 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
70 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
71 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
72 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
73 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
74 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
75 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
76 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
77 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
78 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
79 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
80 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
81 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
82 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
83 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
84 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
85 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
86 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
87 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
88 default: unreachable("query type does not correspond to winsys id");
89 }
90 }
91
92 static bool r600_query_sw_begin(struct r600_common_context *rctx,
93 struct r600_query *rquery)
94 {
95 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
96 enum radeon_value_id ws_id;
97
98 switch(query->b.type) {
99 case PIPE_QUERY_TIMESTAMP_DISJOINT:
100 case PIPE_QUERY_GPU_FINISHED:
101 break;
102 case R600_QUERY_DRAW_CALLS:
103 query->begin_result = rctx->num_draw_calls;
104 break;
105 case R600_QUERY_DECOMPRESS_CALLS:
106 query->begin_result = rctx->num_decompress_calls;
107 break;
108 case R600_QUERY_MRT_DRAW_CALLS:
109 query->begin_result = rctx->num_mrt_draw_calls;
110 break;
111 case R600_QUERY_PRIM_RESTART_CALLS:
112 query->begin_result = rctx->num_prim_restart_calls;
113 break;
114 case R600_QUERY_SPILL_DRAW_CALLS:
115 query->begin_result = rctx->num_spill_draw_calls;
116 break;
117 case R600_QUERY_COMPUTE_CALLS:
118 query->begin_result = rctx->num_compute_calls;
119 break;
120 case R600_QUERY_SPILL_COMPUTE_CALLS:
121 query->begin_result = rctx->num_spill_compute_calls;
122 break;
123 case R600_QUERY_DMA_CALLS:
124 query->begin_result = rctx->num_dma_calls;
125 break;
126 case R600_QUERY_CP_DMA_CALLS:
127 query->begin_result = rctx->num_cp_dma_calls;
128 break;
129 case R600_QUERY_NUM_VS_FLUSHES:
130 query->begin_result = rctx->num_vs_flushes;
131 break;
132 case R600_QUERY_NUM_PS_FLUSHES:
133 query->begin_result = rctx->num_ps_flushes;
134 break;
135 case R600_QUERY_NUM_CS_FLUSHES:
136 query->begin_result = rctx->num_cs_flushes;
137 break;
138 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
139 query->begin_result = rctx->num_cb_cache_flushes;
140 break;
141 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
142 query->begin_result = rctx->num_db_cache_flushes;
143 break;
144 case R600_QUERY_NUM_L2_INVALIDATES:
145 query->begin_result = rctx->num_L2_invalidates;
146 break;
147 case R600_QUERY_NUM_L2_WRITEBACKS:
148 query->begin_result = rctx->num_L2_writebacks;
149 break;
150 case R600_QUERY_NUM_RESIDENT_HANDLES:
151 query->begin_result = rctx->num_resident_handles;
152 break;
153 case R600_QUERY_TC_OFFLOADED_SLOTS:
154 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
155 break;
156 case R600_QUERY_TC_DIRECT_SLOTS:
157 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
158 break;
159 case R600_QUERY_TC_NUM_SYNCS:
160 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
161 break;
162 case R600_QUERY_REQUESTED_VRAM:
163 case R600_QUERY_REQUESTED_GTT:
164 case R600_QUERY_MAPPED_VRAM:
165 case R600_QUERY_MAPPED_GTT:
166 case R600_QUERY_VRAM_USAGE:
167 case R600_QUERY_VRAM_VIS_USAGE:
168 case R600_QUERY_GTT_USAGE:
169 case R600_QUERY_GPU_TEMPERATURE:
170 case R600_QUERY_CURRENT_GPU_SCLK:
171 case R600_QUERY_CURRENT_GPU_MCLK:
172 case R600_QUERY_NUM_MAPPED_BUFFERS:
173 query->begin_result = 0;
174 break;
175 case R600_QUERY_BUFFER_WAIT_TIME:
176 case R600_QUERY_NUM_GFX_IBS:
177 case R600_QUERY_NUM_SDMA_IBS:
178 case R600_QUERY_NUM_BYTES_MOVED:
179 case R600_QUERY_NUM_EVICTIONS:
180 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
181 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
182 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
183 break;
184 }
185 case R600_QUERY_GFX_BO_LIST_SIZE:
186 ws_id = winsys_id_from_type(query->b.type);
187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188 query->begin_time = rctx->ws->query_value(rctx->ws,
189 RADEON_NUM_GFX_IBS);
190 break;
191 case R600_QUERY_CS_THREAD_BUSY:
192 ws_id = winsys_id_from_type(query->b.type);
193 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
194 query->begin_time = os_time_get_nano();
195 break;
196 case R600_QUERY_GALLIUM_THREAD_BUSY:
197 query->begin_result =
198 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
199 query->begin_time = os_time_get_nano();
200 break;
201 case R600_QUERY_GPU_LOAD:
202 case R600_QUERY_GPU_SHADERS_BUSY:
203 case R600_QUERY_GPU_TA_BUSY:
204 case R600_QUERY_GPU_GDS_BUSY:
205 case R600_QUERY_GPU_VGT_BUSY:
206 case R600_QUERY_GPU_IA_BUSY:
207 case R600_QUERY_GPU_SX_BUSY:
208 case R600_QUERY_GPU_WD_BUSY:
209 case R600_QUERY_GPU_BCI_BUSY:
210 case R600_QUERY_GPU_SC_BUSY:
211 case R600_QUERY_GPU_PA_BUSY:
212 case R600_QUERY_GPU_DB_BUSY:
213 case R600_QUERY_GPU_CP_BUSY:
214 case R600_QUERY_GPU_CB_BUSY:
215 case R600_QUERY_GPU_SDMA_BUSY:
216 case R600_QUERY_GPU_PFP_BUSY:
217 case R600_QUERY_GPU_MEQ_BUSY:
218 case R600_QUERY_GPU_ME_BUSY:
219 case R600_QUERY_GPU_SURF_SYNC_BUSY:
220 case R600_QUERY_GPU_CP_DMA_BUSY:
221 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
222 query->begin_result = r600_begin_counter(rctx->screen,
223 query->b.type);
224 break;
225 case R600_QUERY_NUM_COMPILATIONS:
226 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
227 break;
228 case R600_QUERY_NUM_SHADERS_CREATED:
229 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
230 break;
231 case R600_QUERY_NUM_SHADER_CACHE_HITS:
232 query->begin_result =
233 p_atomic_read(&rctx->screen->num_shader_cache_hits);
234 break;
235 case R600_QUERY_GPIN_ASIC_ID:
236 case R600_QUERY_GPIN_NUM_SIMD:
237 case R600_QUERY_GPIN_NUM_RB:
238 case R600_QUERY_GPIN_NUM_SPI:
239 case R600_QUERY_GPIN_NUM_SE:
240 break;
241 default:
242 unreachable("r600_query_sw_begin: bad query type");
243 }
244
245 return true;
246 }
247
248 static bool r600_query_sw_end(struct r600_common_context *rctx,
249 struct r600_query *rquery)
250 {
251 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
252 enum radeon_value_id ws_id;
253
254 switch(query->b.type) {
255 case PIPE_QUERY_TIMESTAMP_DISJOINT:
256 break;
257 case PIPE_QUERY_GPU_FINISHED:
258 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
259 break;
260 case R600_QUERY_DRAW_CALLS:
261 query->end_result = rctx->num_draw_calls;
262 break;
263 case R600_QUERY_DECOMPRESS_CALLS:
264 query->end_result = rctx->num_decompress_calls;
265 break;
266 case R600_QUERY_MRT_DRAW_CALLS:
267 query->end_result = rctx->num_mrt_draw_calls;
268 break;
269 case R600_QUERY_PRIM_RESTART_CALLS:
270 query->end_result = rctx->num_prim_restart_calls;
271 break;
272 case R600_QUERY_SPILL_DRAW_CALLS:
273 query->end_result = rctx->num_spill_draw_calls;
274 break;
275 case R600_QUERY_COMPUTE_CALLS:
276 query->end_result = rctx->num_compute_calls;
277 break;
278 case R600_QUERY_SPILL_COMPUTE_CALLS:
279 query->end_result = rctx->num_spill_compute_calls;
280 break;
281 case R600_QUERY_DMA_CALLS:
282 query->end_result = rctx->num_dma_calls;
283 break;
284 case R600_QUERY_CP_DMA_CALLS:
285 query->end_result = rctx->num_cp_dma_calls;
286 break;
287 case R600_QUERY_NUM_VS_FLUSHES:
288 query->end_result = rctx->num_vs_flushes;
289 break;
290 case R600_QUERY_NUM_PS_FLUSHES:
291 query->end_result = rctx->num_ps_flushes;
292 break;
293 case R600_QUERY_NUM_CS_FLUSHES:
294 query->end_result = rctx->num_cs_flushes;
295 break;
296 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
297 query->end_result = rctx->num_cb_cache_flushes;
298 break;
299 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
300 query->end_result = rctx->num_db_cache_flushes;
301 break;
302 case R600_QUERY_NUM_L2_INVALIDATES:
303 query->end_result = rctx->num_L2_invalidates;
304 break;
305 case R600_QUERY_NUM_L2_WRITEBACKS:
306 query->end_result = rctx->num_L2_writebacks;
307 break;
308 case R600_QUERY_NUM_RESIDENT_HANDLES:
309 query->end_result = rctx->num_resident_handles;
310 break;
311 case R600_QUERY_TC_OFFLOADED_SLOTS:
312 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
313 break;
314 case R600_QUERY_TC_DIRECT_SLOTS:
315 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
316 break;
317 case R600_QUERY_TC_NUM_SYNCS:
318 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
319 break;
320 case R600_QUERY_REQUESTED_VRAM:
321 case R600_QUERY_REQUESTED_GTT:
322 case R600_QUERY_MAPPED_VRAM:
323 case R600_QUERY_MAPPED_GTT:
324 case R600_QUERY_VRAM_USAGE:
325 case R600_QUERY_VRAM_VIS_USAGE:
326 case R600_QUERY_GTT_USAGE:
327 case R600_QUERY_GPU_TEMPERATURE:
328 case R600_QUERY_CURRENT_GPU_SCLK:
329 case R600_QUERY_CURRENT_GPU_MCLK:
330 case R600_QUERY_BUFFER_WAIT_TIME:
331 case R600_QUERY_NUM_MAPPED_BUFFERS:
332 case R600_QUERY_NUM_GFX_IBS:
333 case R600_QUERY_NUM_SDMA_IBS:
334 case R600_QUERY_NUM_BYTES_MOVED:
335 case R600_QUERY_NUM_EVICTIONS:
336 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
337 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
338 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
339 break;
340 }
341 case R600_QUERY_GFX_BO_LIST_SIZE:
342 ws_id = winsys_id_from_type(query->b.type);
343 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
344 query->end_time = rctx->ws->query_value(rctx->ws,
345 RADEON_NUM_GFX_IBS);
346 break;
347 case R600_QUERY_CS_THREAD_BUSY:
348 ws_id = winsys_id_from_type(query->b.type);
349 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
350 query->end_time = os_time_get_nano();
351 break;
352 case R600_QUERY_GALLIUM_THREAD_BUSY:
353 query->end_result =
354 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
355 query->end_time = os_time_get_nano();
356 break;
357 case R600_QUERY_GPU_LOAD:
358 case R600_QUERY_GPU_SHADERS_BUSY:
359 case R600_QUERY_GPU_TA_BUSY:
360 case R600_QUERY_GPU_GDS_BUSY:
361 case R600_QUERY_GPU_VGT_BUSY:
362 case R600_QUERY_GPU_IA_BUSY:
363 case R600_QUERY_GPU_SX_BUSY:
364 case R600_QUERY_GPU_WD_BUSY:
365 case R600_QUERY_GPU_BCI_BUSY:
366 case R600_QUERY_GPU_SC_BUSY:
367 case R600_QUERY_GPU_PA_BUSY:
368 case R600_QUERY_GPU_DB_BUSY:
369 case R600_QUERY_GPU_CP_BUSY:
370 case R600_QUERY_GPU_CB_BUSY:
371 case R600_QUERY_GPU_SDMA_BUSY:
372 case R600_QUERY_GPU_PFP_BUSY:
373 case R600_QUERY_GPU_MEQ_BUSY:
374 case R600_QUERY_GPU_ME_BUSY:
375 case R600_QUERY_GPU_SURF_SYNC_BUSY:
376 case R600_QUERY_GPU_CP_DMA_BUSY:
377 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
378 query->end_result = r600_end_counter(rctx->screen,
379 query->b.type,
380 query->begin_result);
381 query->begin_result = 0;
382 break;
383 case R600_QUERY_NUM_COMPILATIONS:
384 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
385 break;
386 case R600_QUERY_NUM_SHADERS_CREATED:
387 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
388 break;
389 case R600_QUERY_NUM_SHADER_CACHE_HITS:
390 query->end_result =
391 p_atomic_read(&rctx->screen->num_shader_cache_hits);
392 break;
393 case R600_QUERY_GPIN_ASIC_ID:
394 case R600_QUERY_GPIN_NUM_SIMD:
395 case R600_QUERY_GPIN_NUM_RB:
396 case R600_QUERY_GPIN_NUM_SPI:
397 case R600_QUERY_GPIN_NUM_SE:
398 break;
399 default:
400 unreachable("r600_query_sw_end: bad query type");
401 }
402
403 return true;
404 }
405
406 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
407 struct r600_query *rquery,
408 bool wait,
409 union pipe_query_result *result)
410 {
411 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
412
413 switch (query->b.type) {
414 case PIPE_QUERY_TIMESTAMP_DISJOINT:
415 /* Convert from cycles per millisecond to cycles per second (Hz). */
416 result->timestamp_disjoint.frequency =
417 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
418 result->timestamp_disjoint.disjoint = false;
419 return true;
420 case PIPE_QUERY_GPU_FINISHED: {
421 struct pipe_screen *screen = rctx->b.screen;
422 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
423
424 result->b = screen->fence_finish(screen, ctx, query->fence,
425 wait ? PIPE_TIMEOUT_INFINITE : 0);
426 return result->b;
427 }
428
429 case R600_QUERY_GFX_BO_LIST_SIZE:
430 result->u64 = (query->end_result - query->begin_result) /
431 (query->end_time - query->begin_time);
432 return true;
433 case R600_QUERY_CS_THREAD_BUSY:
434 case R600_QUERY_GALLIUM_THREAD_BUSY:
435 result->u64 = (query->end_result - query->begin_result) * 100 /
436 (query->end_time - query->begin_time);
437 return true;
438 case R600_QUERY_GPIN_ASIC_ID:
439 result->u32 = 0;
440 return true;
441 case R600_QUERY_GPIN_NUM_SIMD:
442 result->u32 = rctx->screen->info.num_good_compute_units;
443 return true;
444 case R600_QUERY_GPIN_NUM_RB:
445 result->u32 = rctx->screen->info.num_render_backends;
446 return true;
447 case R600_QUERY_GPIN_NUM_SPI:
448 result->u32 = 1; /* all supported chips have one SPI per SE */
449 return true;
450 case R600_QUERY_GPIN_NUM_SE:
451 result->u32 = rctx->screen->info.max_se;
452 return true;
453 }
454
455 result->u64 = query->end_result - query->begin_result;
456
457 switch (query->b.type) {
458 case R600_QUERY_BUFFER_WAIT_TIME:
459 case R600_QUERY_GPU_TEMPERATURE:
460 result->u64 /= 1000;
461 break;
462 case R600_QUERY_CURRENT_GPU_SCLK:
463 case R600_QUERY_CURRENT_GPU_MCLK:
464 result->u64 *= 1000000;
465 break;
466 }
467
468 return true;
469 }
470
471
472 static struct r600_query_ops sw_query_ops = {
473 .destroy = r600_query_sw_destroy,
474 .begin = r600_query_sw_begin,
475 .end = r600_query_sw_end,
476 .get_result = r600_query_sw_get_result,
477 .get_result_resource = NULL
478 };
479
480 static struct pipe_query *r600_query_sw_create(unsigned query_type)
481 {
482 struct r600_query_sw *query;
483
484 query = CALLOC_STRUCT(r600_query_sw);
485 if (!query)
486 return NULL;
487
488 query->b.type = query_type;
489 query->b.ops = &sw_query_ops;
490
491 return (struct pipe_query *)query;
492 }
493
494 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
495 struct r600_query *rquery)
496 {
497 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
498 struct r600_query_buffer *prev = query->buffer.previous;
499
500 /* Release all query buffers. */
501 while (prev) {
502 struct r600_query_buffer *qbuf = prev;
503 prev = prev->previous;
504 r600_resource_reference(&qbuf->buf, NULL);
505 FREE(qbuf);
506 }
507
508 r600_resource_reference(&query->buffer.buf, NULL);
509 r600_resource_reference(&query->workaround_buf, NULL);
510 FREE(rquery);
511 }
512
513 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
514 struct r600_query_hw *query)
515 {
516 unsigned buf_size = MAX2(query->result_size,
517 rscreen->info.min_alloc_size);
518
519 /* Queries are normally read by the CPU after
520 * being written by the gpu, hence staging is probably a good
521 * usage pattern.
522 */
523 struct r600_resource *buf = (struct r600_resource*)
524 pipe_buffer_create(&rscreen->b, 0,
525 PIPE_USAGE_STAGING, buf_size);
526 if (!buf)
527 return NULL;
528
529 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
530 r600_resource_reference(&buf, NULL);
531 return NULL;
532 }
533
534 return buf;
535 }
536
537 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
538 struct r600_query_hw *query,
539 struct r600_resource *buffer)
540 {
541 /* Callers ensure that the buffer is currently unused by the GPU. */
542 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
543 PIPE_TRANSFER_WRITE |
544 PIPE_TRANSFER_UNSYNCHRONIZED);
545 if (!results)
546 return false;
547
548 memset(results, 0, buffer->b.b.width0);
549
550 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
551 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
552 unsigned max_rbs = rscreen->info.num_render_backends;
553 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
554 unsigned num_results;
555 unsigned i, j;
556
557 /* Set top bits for unused backends. */
558 num_results = buffer->b.b.width0 / query->result_size;
559 for (j = 0; j < num_results; j++) {
560 for (i = 0; i < max_rbs; i++) {
561 if (!(enabled_rb_mask & (1<<i))) {
562 results[(i * 4)+1] = 0x80000000;
563 results[(i * 4)+3] = 0x80000000;
564 }
565 }
566 results += 4 * max_rbs;
567 }
568 }
569
570 return true;
571 }
572
573 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
574 struct r600_query *rquery,
575 bool wait,
576 enum pipe_query_value_type result_type,
577 int index,
578 struct pipe_resource *resource,
579 unsigned offset);
580
581 static struct r600_query_ops query_hw_ops = {
582 .destroy = r600_query_hw_destroy,
583 .begin = r600_query_hw_begin,
584 .end = r600_query_hw_end,
585 .get_result = r600_query_hw_get_result,
586 .get_result_resource = r600_query_hw_get_result_resource,
587 };
588
589 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
590 struct r600_query_hw *query,
591 struct r600_resource *buffer,
592 uint64_t va);
593 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
594 struct r600_query_hw *query,
595 struct r600_resource *buffer,
596 uint64_t va);
597 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
598 struct r600_query_hw *, void *buffer,
599 union pipe_query_result *result);
600 static void r600_query_hw_clear_result(struct r600_query_hw *,
601 union pipe_query_result *);
602
603 static struct r600_query_hw_ops query_hw_default_hw_ops = {
604 .prepare_buffer = r600_query_hw_prepare_buffer,
605 .emit_start = r600_query_hw_do_emit_start,
606 .emit_stop = r600_query_hw_do_emit_stop,
607 .clear_result = r600_query_hw_clear_result,
608 .add_result = r600_query_hw_add_result,
609 };
610
611 bool r600_query_hw_init(struct r600_common_screen *rscreen,
612 struct r600_query_hw *query)
613 {
614 query->buffer.buf = r600_new_query_buffer(rscreen, query);
615 if (!query->buffer.buf)
616 return false;
617
618 return true;
619 }
620
621 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
622 unsigned query_type,
623 unsigned index)
624 {
625 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
626 if (!query)
627 return NULL;
628
629 query->b.type = query_type;
630 query->b.ops = &query_hw_ops;
631 query->ops = &query_hw_default_hw_ops;
632
633 switch (query_type) {
634 case PIPE_QUERY_OCCLUSION_COUNTER:
635 case PIPE_QUERY_OCCLUSION_PREDICATE:
636 query->result_size = 16 * rscreen->info.num_render_backends;
637 query->result_size += 16; /* for the fence + alignment */
638 query->num_cs_dw_begin = 6;
639 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
640 break;
641 case PIPE_QUERY_TIME_ELAPSED:
642 query->result_size = 24;
643 query->num_cs_dw_begin = 8;
644 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
645 break;
646 case PIPE_QUERY_TIMESTAMP:
647 query->result_size = 16;
648 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
649 query->flags = R600_QUERY_HW_FLAG_NO_START;
650 break;
651 case PIPE_QUERY_PRIMITIVES_EMITTED:
652 case PIPE_QUERY_PRIMITIVES_GENERATED:
653 case PIPE_QUERY_SO_STATISTICS:
654 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
655 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
656 query->result_size = 32;
657 query->num_cs_dw_begin = 6;
658 query->num_cs_dw_end = 6;
659 query->stream = index;
660 break;
661 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
662 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
663 query->result_size = 32 * R600_MAX_STREAMS;
664 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
665 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
666 break;
667 case PIPE_QUERY_PIPELINE_STATISTICS:
668 /* 11 values on EG, 8 on R600. */
669 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
670 query->result_size += 8; /* for the fence + alignment */
671 query->num_cs_dw_begin = 6;
672 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
673 break;
674 default:
675 assert(0);
676 FREE(query);
677 return NULL;
678 }
679
680 if (!r600_query_hw_init(rscreen, query)) {
681 FREE(query);
682 return NULL;
683 }
684
685 return (struct pipe_query *)query;
686 }
687
688 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
689 unsigned type, int diff)
690 {
691 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
692 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
693 bool old_enable = rctx->num_occlusion_queries != 0;
694 bool old_perfect_enable =
695 rctx->num_perfect_occlusion_queries != 0;
696 bool enable, perfect_enable;
697
698 rctx->num_occlusion_queries += diff;
699 assert(rctx->num_occlusion_queries >= 0);
700
701 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
702 rctx->num_perfect_occlusion_queries += diff;
703 assert(rctx->num_perfect_occlusion_queries >= 0);
704 }
705
706 enable = rctx->num_occlusion_queries != 0;
707 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
708
709 if (enable != old_enable || perfect_enable != old_perfect_enable) {
710 struct r600_context *ctx = (struct r600_context*)rctx;
711 r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
712 }
713 }
714 }
715
716 static unsigned event_type_for_stream(unsigned stream)
717 {
718 switch (stream) {
719 default:
720 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
721 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
722 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
723 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
724 }
725 }
726
727 static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
728 unsigned stream)
729 {
730 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
731 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
732 radeon_emit(cs, va);
733 radeon_emit(cs, va >> 32);
734 }
735
736 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
737 struct r600_query_hw *query,
738 struct r600_resource *buffer,
739 uint64_t va)
740 {
741 struct radeon_winsys_cs *cs = ctx->gfx.cs;
742
743 switch (query->b.type) {
744 case PIPE_QUERY_OCCLUSION_COUNTER:
745 case PIPE_QUERY_OCCLUSION_PREDICATE:
746 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
747 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
748 radeon_emit(cs, va);
749 radeon_emit(cs, va >> 32);
750 break;
751 case PIPE_QUERY_PRIMITIVES_EMITTED:
752 case PIPE_QUERY_PRIMITIVES_GENERATED:
753 case PIPE_QUERY_SO_STATISTICS:
754 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
755 emit_sample_streamout(cs, va, query->stream);
756 break;
757 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
758 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
759 emit_sample_streamout(cs, va + 32 * stream, stream);
760 break;
761 case PIPE_QUERY_TIME_ELAPSED:
762 /* Write the timestamp after the last draw is done.
763 * (bottom-of-pipe)
764 */
765 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
766 0, EOP_DATA_SEL_TIMESTAMP,
767 NULL, va, 0, query->b.type);
768 break;
769 case PIPE_QUERY_PIPELINE_STATISTICS:
770 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
771 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
772 radeon_emit(cs, va);
773 radeon_emit(cs, va >> 32);
774 break;
775 default:
776 assert(0);
777 }
778 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
779 RADEON_PRIO_QUERY);
780 }
781
782 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
783 struct r600_query_hw *query)
784 {
785 uint64_t va;
786
787 if (!query->buffer.buf)
788 return; // previous buffer allocation failure
789
790 r600_update_occlusion_query_state(ctx, query->b.type, 1);
791 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
792
793 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
794 true);
795
796 /* Get a new query buffer if needed. */
797 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
798 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
799 *qbuf = query->buffer;
800 query->buffer.results_end = 0;
801 query->buffer.previous = qbuf;
802 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
803 if (!query->buffer.buf)
804 return;
805 }
806
807 /* emit begin query */
808 va = query->buffer.buf->gpu_address + query->buffer.results_end;
809
810 query->ops->emit_start(ctx, query, query->buffer.buf, va);
811
812 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
813 }
814
815 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
816 struct r600_query_hw *query,
817 struct r600_resource *buffer,
818 uint64_t va)
819 {
820 struct radeon_winsys_cs *cs = ctx->gfx.cs;
821 uint64_t fence_va = 0;
822
823 switch (query->b.type) {
824 case PIPE_QUERY_OCCLUSION_COUNTER:
825 case PIPE_QUERY_OCCLUSION_PREDICATE:
826 va += 8;
827 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
828 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
829 radeon_emit(cs, va);
830 radeon_emit(cs, va >> 32);
831
832 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
833 break;
834 case PIPE_QUERY_PRIMITIVES_EMITTED:
835 case PIPE_QUERY_PRIMITIVES_GENERATED:
836 case PIPE_QUERY_SO_STATISTICS:
837 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
838 va += 16;
839 emit_sample_streamout(cs, va, query->stream);
840 break;
841 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
842 va += 16;
843 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
844 emit_sample_streamout(cs, va + 32 * stream, stream);
845 break;
846 case PIPE_QUERY_TIME_ELAPSED:
847 va += 8;
848 /* fall through */
849 case PIPE_QUERY_TIMESTAMP:
850 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
851 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
852 0, query->b.type);
853 fence_va = va + 8;
854 break;
855 case PIPE_QUERY_PIPELINE_STATISTICS: {
856 unsigned sample_size = (query->result_size - 8) / 2;
857
858 va += sample_size;
859 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
860 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
861 radeon_emit(cs, va);
862 radeon_emit(cs, va >> 32);
863
864 fence_va = va + sample_size;
865 break;
866 }
867 default:
868 assert(0);
869 }
870 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
871 RADEON_PRIO_QUERY);
872
873 if (fence_va)
874 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
875 EOP_DATA_SEL_VALUE_32BIT,
876 query->buffer.buf, fence_va, 0x80000000,
877 query->b.type);
878 }
879
880 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
881 struct r600_query_hw *query)
882 {
883 uint64_t va;
884
885 if (!query->buffer.buf)
886 return; // previous buffer allocation failure
887
888 /* The queries which need begin already called this in begin_query. */
889 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
890 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
891 }
892
893 /* emit end query */
894 va = query->buffer.buf->gpu_address + query->buffer.results_end;
895
896 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
897
898 query->buffer.results_end += query->result_size;
899
900 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
901 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
902
903 r600_update_occlusion_query_state(ctx, query->b.type, -1);
904 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
905 }
906
907 static void emit_set_predicate(struct r600_common_context *ctx,
908 struct r600_resource *buf, uint64_t va,
909 uint32_t op)
910 {
911 struct radeon_winsys_cs *cs = ctx->gfx.cs;
912
913 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
914 radeon_emit(cs, va);
915 radeon_emit(cs, op | ((va >> 32) & 0xFF));
916 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
917 RADEON_PRIO_QUERY);
918 }
919
920 static void r600_emit_query_predication(struct r600_common_context *ctx,
921 struct r600_atom *atom)
922 {
923 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
924 struct r600_query_buffer *qbuf;
925 uint32_t op;
926 bool flag_wait, invert;
927
928 if (!query)
929 return;
930
931 invert = ctx->render_cond_invert;
932 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
933 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
934
935 if (query->workaround_buf) {
936 op = PRED_OP(PREDICATION_OP_BOOL64);
937 } else {
938 switch (query->b.type) {
939 case PIPE_QUERY_OCCLUSION_COUNTER:
940 case PIPE_QUERY_OCCLUSION_PREDICATE:
941 op = PRED_OP(PREDICATION_OP_ZPASS);
942 break;
943 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
944 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
945 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
946 invert = !invert;
947 break;
948 default:
949 assert(0);
950 return;
951 }
952 }
953
954 /* if true then invert, see GL_ARB_conditional_render_inverted */
955 if (invert)
956 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
957 else
958 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
959
960 /* Use the value written by compute shader as a workaround. Note that
961 * the wait flag does not apply in this predication mode.
962 *
963 * The shader outputs the result value to L2. Workarounds only affect VI
964 * and later, where the CP reads data from L2, so we don't need an
965 * additional flush.
966 */
967 if (query->workaround_buf) {
968 uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
969 emit_set_predicate(ctx, query->workaround_buf, va, op);
970 return;
971 }
972
973 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
974
975 /* emit predicate packets for all data blocks */
976 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
977 unsigned results_base = 0;
978 uint64_t va_base = qbuf->buf->gpu_address;
979
980 while (results_base < qbuf->results_end) {
981 uint64_t va = va_base + results_base;
982
983 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
984 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
985 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
986
987 /* set CONTINUE bit for all packets except the first */
988 op |= PREDICATION_CONTINUE;
989 }
990 } else {
991 emit_set_predicate(ctx, qbuf->buf, va, op);
992 op |= PREDICATION_CONTINUE;
993 }
994
995 results_base += query->result_size;
996 }
997 }
998 }
999
1000 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
1001 {
1002 struct r600_common_screen *rscreen =
1003 (struct r600_common_screen *)ctx->screen;
1004
1005 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
1006 query_type == PIPE_QUERY_GPU_FINISHED ||
1007 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
1008 return r600_query_sw_create(query_type);
1009
1010 return r600_query_hw_create(rscreen, query_type, index);
1011 }
1012
1013 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1014 {
1015 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1016 struct r600_query *rquery = (struct r600_query *)query;
1017
1018 rquery->ops->destroy(rctx->screen, rquery);
1019 }
1020
1021 static boolean r600_begin_query(struct pipe_context *ctx,
1022 struct pipe_query *query)
1023 {
1024 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1025 struct r600_query *rquery = (struct r600_query *)query;
1026
1027 return rquery->ops->begin(rctx, rquery);
1028 }
1029
1030 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
1031 struct r600_query_hw *query)
1032 {
1033 struct r600_query_buffer *prev = query->buffer.previous;
1034
1035 /* Discard the old query buffers. */
1036 while (prev) {
1037 struct r600_query_buffer *qbuf = prev;
1038 prev = prev->previous;
1039 r600_resource_reference(&qbuf->buf, NULL);
1040 FREE(qbuf);
1041 }
1042
1043 query->buffer.results_end = 0;
1044 query->buffer.previous = NULL;
1045
1046 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1047 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1048 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1049 r600_resource_reference(&query->buffer.buf, NULL);
1050 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1051 } else {
1052 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1053 r600_resource_reference(&query->buffer.buf, NULL);
1054 }
1055 }
1056
1057 bool r600_query_hw_begin(struct r600_common_context *rctx,
1058 struct r600_query *rquery)
1059 {
1060 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1061
1062 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1063 assert(0);
1064 return false;
1065 }
1066
1067 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1068 r600_query_hw_reset_buffers(rctx, query);
1069
1070 r600_resource_reference(&query->workaround_buf, NULL);
1071
1072 r600_query_hw_emit_start(rctx, query);
1073 if (!query->buffer.buf)
1074 return false;
1075
1076 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1077 return true;
1078 }
1079
1080 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1081 {
1082 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1083 struct r600_query *rquery = (struct r600_query *)query;
1084
1085 return rquery->ops->end(rctx, rquery);
1086 }
1087
1088 bool r600_query_hw_end(struct r600_common_context *rctx,
1089 struct r600_query *rquery)
1090 {
1091 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1092
1093 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1094 r600_query_hw_reset_buffers(rctx, query);
1095
1096 r600_query_hw_emit_stop(rctx, query);
1097
1098 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1099 LIST_DELINIT(&query->list);
1100
1101 if (!query->buffer.buf)
1102 return false;
1103
1104 return true;
1105 }
1106
1107 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1108 struct r600_query_hw *rquery, int index,
1109 struct r600_hw_query_params *params)
1110 {
1111 unsigned max_rbs = rctx->screen->info.num_render_backends;
1112
1113 params->pair_stride = 0;
1114 params->pair_count = 1;
1115
1116 switch (rquery->b.type) {
1117 case PIPE_QUERY_OCCLUSION_COUNTER:
1118 case PIPE_QUERY_OCCLUSION_PREDICATE:
1119 params->start_offset = 0;
1120 params->end_offset = 8;
1121 params->fence_offset = max_rbs * 16;
1122 params->pair_stride = 16;
1123 params->pair_count = max_rbs;
1124 break;
1125 case PIPE_QUERY_TIME_ELAPSED:
1126 params->start_offset = 0;
1127 params->end_offset = 8;
1128 params->fence_offset = 16;
1129 break;
1130 case PIPE_QUERY_TIMESTAMP:
1131 params->start_offset = 0;
1132 params->end_offset = 0;
1133 params->fence_offset = 8;
1134 break;
1135 case PIPE_QUERY_PRIMITIVES_EMITTED:
1136 params->start_offset = 8;
1137 params->end_offset = 24;
1138 params->fence_offset = params->end_offset + 4;
1139 break;
1140 case PIPE_QUERY_PRIMITIVES_GENERATED:
1141 params->start_offset = 0;
1142 params->end_offset = 16;
1143 params->fence_offset = params->end_offset + 4;
1144 break;
1145 case PIPE_QUERY_SO_STATISTICS:
1146 params->start_offset = 8 - index * 8;
1147 params->end_offset = 24 - index * 8;
1148 params->fence_offset = params->end_offset + 4;
1149 break;
1150 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1151 params->pair_count = R600_MAX_STREAMS;
1152 params->pair_stride = 32;
1153 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1154 params->start_offset = 0;
1155 params->end_offset = 16;
1156
1157 /* We can re-use the high dword of the last 64-bit value as a
1158 * fence: it is initialized as 0, and the high bit is set by
1159 * the write of the streamout stats event.
1160 */
1161 params->fence_offset = rquery->result_size - 4;
1162 break;
1163 case PIPE_QUERY_PIPELINE_STATISTICS:
1164 {
1165 /* Offsets apply to EG+ */
1166 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1167 params->start_offset = offsets[index];
1168 params->end_offset = 88 + offsets[index];
1169 params->fence_offset = 2 * 88;
1170 break;
1171 }
1172 default:
1173 unreachable("r600_get_hw_query_params unsupported");
1174 }
1175 }
1176
1177 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1178 bool test_status_bit)
1179 {
1180 uint32_t *current_result = (uint32_t*)map;
1181 uint64_t start, end;
1182
1183 start = (uint64_t)current_result[start_index] |
1184 (uint64_t)current_result[start_index+1] << 32;
1185 end = (uint64_t)current_result[end_index] |
1186 (uint64_t)current_result[end_index+1] << 32;
1187
1188 if (!test_status_bit ||
1189 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1190 return end - start;
1191 }
1192 return 0;
1193 }
1194
1195 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1196 struct r600_query_hw *query,
1197 void *buffer,
1198 union pipe_query_result *result)
1199 {
1200 unsigned max_rbs = rscreen->info.num_render_backends;
1201
1202 switch (query->b.type) {
1203 case PIPE_QUERY_OCCLUSION_COUNTER: {
1204 for (unsigned i = 0; i < max_rbs; ++i) {
1205 unsigned results_base = i * 16;
1206 result->u64 +=
1207 r600_query_read_result(buffer + results_base, 0, 2, true);
1208 }
1209 break;
1210 }
1211 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1212 for (unsigned i = 0; i < max_rbs; ++i) {
1213 unsigned results_base = i * 16;
1214 result->b = result->b ||
1215 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1216 }
1217 break;
1218 }
1219 case PIPE_QUERY_TIME_ELAPSED:
1220 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1221 break;
1222 case PIPE_QUERY_TIMESTAMP:
1223 result->u64 = *(uint64_t*)buffer;
1224 break;
1225 case PIPE_QUERY_PRIMITIVES_EMITTED:
1226 /* SAMPLE_STREAMOUTSTATS stores this structure:
1227 * {
1228 * u64 NumPrimitivesWritten;
1229 * u64 PrimitiveStorageNeeded;
1230 * }
1231 * We only need NumPrimitivesWritten here. */
1232 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1233 break;
1234 case PIPE_QUERY_PRIMITIVES_GENERATED:
1235 /* Here we read PrimitiveStorageNeeded. */
1236 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1237 break;
1238 case PIPE_QUERY_SO_STATISTICS:
1239 result->so_statistics.num_primitives_written +=
1240 r600_query_read_result(buffer, 2, 6, true);
1241 result->so_statistics.primitives_storage_needed +=
1242 r600_query_read_result(buffer, 0, 4, true);
1243 break;
1244 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1245 result->b = result->b ||
1246 r600_query_read_result(buffer, 2, 6, true) !=
1247 r600_query_read_result(buffer, 0, 4, true);
1248 break;
1249 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1250 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1251 result->b = result->b ||
1252 r600_query_read_result(buffer, 2, 6, true) !=
1253 r600_query_read_result(buffer, 0, 4, true);
1254 buffer = (char *)buffer + 32;
1255 }
1256 break;
1257 case PIPE_QUERY_PIPELINE_STATISTICS:
1258 if (rscreen->chip_class >= EVERGREEN) {
1259 result->pipeline_statistics.ps_invocations +=
1260 r600_query_read_result(buffer, 0, 22, false);
1261 result->pipeline_statistics.c_primitives +=
1262 r600_query_read_result(buffer, 2, 24, false);
1263 result->pipeline_statistics.c_invocations +=
1264 r600_query_read_result(buffer, 4, 26, false);
1265 result->pipeline_statistics.vs_invocations +=
1266 r600_query_read_result(buffer, 6, 28, false);
1267 result->pipeline_statistics.gs_invocations +=
1268 r600_query_read_result(buffer, 8, 30, false);
1269 result->pipeline_statistics.gs_primitives +=
1270 r600_query_read_result(buffer, 10, 32, false);
1271 result->pipeline_statistics.ia_primitives +=
1272 r600_query_read_result(buffer, 12, 34, false);
1273 result->pipeline_statistics.ia_vertices +=
1274 r600_query_read_result(buffer, 14, 36, false);
1275 result->pipeline_statistics.hs_invocations +=
1276 r600_query_read_result(buffer, 16, 38, false);
1277 result->pipeline_statistics.ds_invocations +=
1278 r600_query_read_result(buffer, 18, 40, false);
1279 result->pipeline_statistics.cs_invocations +=
1280 r600_query_read_result(buffer, 20, 42, false);
1281 } else {
1282 result->pipeline_statistics.ps_invocations +=
1283 r600_query_read_result(buffer, 0, 16, false);
1284 result->pipeline_statistics.c_primitives +=
1285 r600_query_read_result(buffer, 2, 18, false);
1286 result->pipeline_statistics.c_invocations +=
1287 r600_query_read_result(buffer, 4, 20, false);
1288 result->pipeline_statistics.vs_invocations +=
1289 r600_query_read_result(buffer, 6, 22, false);
1290 result->pipeline_statistics.gs_invocations +=
1291 r600_query_read_result(buffer, 8, 24, false);
1292 result->pipeline_statistics.gs_primitives +=
1293 r600_query_read_result(buffer, 10, 26, false);
1294 result->pipeline_statistics.ia_primitives +=
1295 r600_query_read_result(buffer, 12, 28, false);
1296 result->pipeline_statistics.ia_vertices +=
1297 r600_query_read_result(buffer, 14, 30, false);
1298 }
1299 #if 0 /* for testing */
1300 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1301 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1302 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1303 result->pipeline_statistics.ia_vertices,
1304 result->pipeline_statistics.ia_primitives,
1305 result->pipeline_statistics.vs_invocations,
1306 result->pipeline_statistics.hs_invocations,
1307 result->pipeline_statistics.ds_invocations,
1308 result->pipeline_statistics.gs_invocations,
1309 result->pipeline_statistics.gs_primitives,
1310 result->pipeline_statistics.c_invocations,
1311 result->pipeline_statistics.c_primitives,
1312 result->pipeline_statistics.ps_invocations,
1313 result->pipeline_statistics.cs_invocations);
1314 #endif
1315 break;
1316 default:
1317 assert(0);
1318 }
1319 }
1320
1321 static boolean r600_get_query_result(struct pipe_context *ctx,
1322 struct pipe_query *query, boolean wait,
1323 union pipe_query_result *result)
1324 {
1325 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1326 struct r600_query *rquery = (struct r600_query *)query;
1327
1328 return rquery->ops->get_result(rctx, rquery, wait, result);
1329 }
1330
1331 static void r600_get_query_result_resource(struct pipe_context *ctx,
1332 struct pipe_query *query,
1333 boolean wait,
1334 enum pipe_query_value_type result_type,
1335 int index,
1336 struct pipe_resource *resource,
1337 unsigned offset)
1338 {
1339 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1340 struct r600_query *rquery = (struct r600_query *)query;
1341
1342 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1343 resource, offset);
1344 }
1345
1346 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1347 union pipe_query_result *result)
1348 {
1349 util_query_clear_result(result, query->b.type);
1350 }
1351
1352 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1353 struct r600_query *rquery,
1354 bool wait, union pipe_query_result *result)
1355 {
1356 struct r600_common_screen *rscreen = rctx->screen;
1357 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1358 struct r600_query_buffer *qbuf;
1359
1360 query->ops->clear_result(query, result);
1361
1362 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1363 unsigned usage = PIPE_TRANSFER_READ |
1364 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1365 unsigned results_base = 0;
1366 void *map;
1367
1368 if (rquery->b.flushed)
1369 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1370 else
1371 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1372
1373 if (!map)
1374 return false;
1375
1376 while (results_base != qbuf->results_end) {
1377 query->ops->add_result(rscreen, query, map + results_base,
1378 result);
1379 results_base += query->result_size;
1380 }
1381 }
1382
1383 /* Convert the time to expected units. */
1384 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1385 rquery->type == PIPE_QUERY_TIMESTAMP) {
1386 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1387 }
1388 return true;
1389 }
1390
1391 /* Create the compute shader that is used to collect the results.
1392 *
1393 * One compute grid with a single thread is launched for every query result
1394 * buffer. The thread (optionally) reads a previous summary buffer, then
1395 * accumulates data from the query result buffer, and writes the result either
1396 * to a summary buffer to be consumed by the next grid invocation or to the
1397 * user-supplied buffer.
1398 *
1399 * Data layout:
1400 *
1401 * CONST
1402 * 0.x = end_offset
1403 * 0.y = result_stride
1404 * 0.z = result_count
1405 * 0.w = bit field:
1406 * 1: read previously accumulated values
1407 * 2: write accumulated values for chaining
1408 * 4: write result available
1409 * 8: convert result to boolean (0/1)
1410 * 16: only read one dword and use that as result
1411 * 32: apply timestamp conversion
1412 * 64: store full 64 bits result
1413 * 128: store signed 32 bits result
1414 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1415 * 1.x = fence_offset
1416 * 1.y = pair_stride
1417 * 1.z = pair_count
1418 *
1419 * BUFFER[0] = query result buffer
1420 * BUFFER[1] = previous summary buffer
1421 * BUFFER[2] = next summary buffer or user-supplied buffer
1422 */
1423 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1424 {
1425 /* TEMP[0].xy = accumulated result so far
1426 * TEMP[0].z = result not available
1427 *
1428 * TEMP[1].x = current result index
1429 * TEMP[1].y = current pair index
1430 */
1431 static const char text_tmpl[] =
1432 "COMP\n"
1433 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1434 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1435 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1436 "DCL BUFFER[0]\n"
1437 "DCL BUFFER[1]\n"
1438 "DCL BUFFER[2]\n"
1439 "DCL CONST[0][0..1]\n"
1440 "DCL TEMP[0..5]\n"
1441 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1442 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1443 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1444 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1445 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1446
1447 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1448 "UIF TEMP[5]\n"
1449 /* Check result availability. */
1450 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
1451 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1452 "MOV TEMP[1], TEMP[0].zzzz\n"
1453 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1454
1455 /* Load result if available. */
1456 "UIF TEMP[1]\n"
1457 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1458 "ENDIF\n"
1459 "ELSE\n"
1460 /* Load previously accumulated result if requested. */
1461 "MOV TEMP[0], IMM[0].xxxx\n"
1462 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1463 "UIF TEMP[4]\n"
1464 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1465 "ENDIF\n"
1466
1467 "MOV TEMP[1].x, IMM[0].xxxx\n"
1468 "BGNLOOP\n"
1469 /* Break if accumulated result so far is not available. */
1470 "UIF TEMP[0].zzzz\n"
1471 "BRK\n"
1472 "ENDIF\n"
1473
1474 /* Break if result_index >= result_count. */
1475 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1476 "UIF TEMP[5]\n"
1477 "BRK\n"
1478 "ENDIF\n"
1479
1480 /* Load fence and check result availability */
1481 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1482 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1483 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1484 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1485 "UIF TEMP[0].zzzz\n"
1486 "BRK\n"
1487 "ENDIF\n"
1488
1489 "MOV TEMP[1].y, IMM[0].xxxx\n"
1490 "BGNLOOP\n"
1491 /* Load start and end. */
1492 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1493 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1494 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1495
1496 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1497 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1498
1499 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1500
1501 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1502 "UIF TEMP[5].zzzz\n"
1503 /* Load second start/end half-pair and
1504 * take the difference
1505 */
1506 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1507 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1508 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1509
1510 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1511 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1512 "ENDIF\n"
1513
1514 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1515
1516 /* Increment pair index */
1517 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1518 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1519 "UIF TEMP[5]\n"
1520 "BRK\n"
1521 "ENDIF\n"
1522 "ENDLOOP\n"
1523
1524 /* Increment result index */
1525 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1526 "ENDLOOP\n"
1527 "ENDIF\n"
1528
1529 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1530 "UIF TEMP[4]\n"
1531 /* Store accumulated data for chaining. */
1532 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1533 "ELSE\n"
1534 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1535 "UIF TEMP[4]\n"
1536 /* Store result availability. */
1537 "NOT TEMP[0].z, TEMP[0]\n"
1538 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1539 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1540
1541 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1542 "UIF TEMP[4]\n"
1543 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1544 "ENDIF\n"
1545 "ELSE\n"
1546 /* Store result if it is available. */
1547 "NOT TEMP[4], TEMP[0].zzzz\n"
1548 "UIF TEMP[4]\n"
1549 /* Apply timestamp conversion */
1550 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1551 "UIF TEMP[4]\n"
1552 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1553 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1554 "ENDIF\n"
1555
1556 /* Convert to boolean */
1557 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1558 "UIF TEMP[4]\n"
1559 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1560 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1561 "MOV TEMP[0].y, IMM[0].xxxx\n"
1562 "ENDIF\n"
1563
1564 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1565 "UIF TEMP[4]\n"
1566 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1567 "ELSE\n"
1568 /* Clamping */
1569 "UIF TEMP[0].yyyy\n"
1570 "MOV TEMP[0].x, IMM[0].wwww\n"
1571 "ENDIF\n"
1572
1573 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1574 "UIF TEMP[4]\n"
1575 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1576 "ENDIF\n"
1577
1578 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1579 "ENDIF\n"
1580 "ENDIF\n"
1581 "ENDIF\n"
1582 "ENDIF\n"
1583
1584 "END\n";
1585
1586 char text[sizeof(text_tmpl) + 32];
1587 struct tgsi_token tokens[1024];
1588 struct pipe_compute_state state = {};
1589
1590 /* Hard code the frequency into the shader so that the backend can
1591 * use the full range of optimizations for divide-by-constant.
1592 */
1593 snprintf(text, sizeof(text), text_tmpl,
1594 rctx->screen->info.clock_crystal_freq);
1595
1596 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1597 assert(false);
1598 return;
1599 }
1600
1601 state.ir_type = PIPE_SHADER_IR_TGSI;
1602 state.prog = tokens;
1603
1604 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1605 }
1606
1607 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1608 struct r600_qbo_state *st)
1609 {
1610 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1611
1612 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1613 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1614
1615 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1616 for (unsigned i = 0; i < 3; ++i)
1617 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1618 }
1619
1620 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1621 struct r600_query *rquery,
1622 bool wait,
1623 enum pipe_query_value_type result_type,
1624 int index,
1625 struct pipe_resource *resource,
1626 unsigned offset)
1627 {
1628 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1629 struct r600_query_buffer *qbuf;
1630 struct r600_query_buffer *qbuf_prev;
1631 struct pipe_resource *tmp_buffer = NULL;
1632 unsigned tmp_buffer_offset = 0;
1633 struct r600_qbo_state saved_state = {};
1634 struct pipe_grid_info grid = {};
1635 struct pipe_constant_buffer constant_buffer = {};
1636 struct pipe_shader_buffer ssbo[3];
1637 struct r600_hw_query_params params;
1638 struct {
1639 uint32_t end_offset;
1640 uint32_t result_stride;
1641 uint32_t result_count;
1642 uint32_t config;
1643 uint32_t fence_offset;
1644 uint32_t pair_stride;
1645 uint32_t pair_count;
1646 } consts;
1647
1648 if (!rctx->query_result_shader) {
1649 r600_create_query_result_shader(rctx);
1650 if (!rctx->query_result_shader)
1651 return;
1652 }
1653
1654 if (query->buffer.previous) {
1655 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1656 &tmp_buffer_offset, &tmp_buffer);
1657 if (!tmp_buffer)
1658 return;
1659 }
1660
1661 rctx->save_qbo_state(&rctx->b, &saved_state);
1662
1663 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1664 consts.end_offset = params.end_offset - params.start_offset;
1665 consts.fence_offset = params.fence_offset - params.start_offset;
1666 consts.result_stride = query->result_size;
1667 consts.pair_stride = params.pair_stride;
1668 consts.pair_count = params.pair_count;
1669
1670 constant_buffer.buffer_size = sizeof(consts);
1671 constant_buffer.user_buffer = &consts;
1672
1673 ssbo[1].buffer = tmp_buffer;
1674 ssbo[1].buffer_offset = tmp_buffer_offset;
1675 ssbo[1].buffer_size = 16;
1676
1677 ssbo[2] = ssbo[1];
1678
1679 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1680
1681 grid.block[0] = 1;
1682 grid.block[1] = 1;
1683 grid.block[2] = 1;
1684 grid.grid[0] = 1;
1685 grid.grid[1] = 1;
1686 grid.grid[2] = 1;
1687
1688 consts.config = 0;
1689 if (index < 0)
1690 consts.config |= 4;
1691 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE)
1692 consts.config |= 8;
1693 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1694 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1695 consts.config |= 8 | 256;
1696 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1697 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1698 consts.config |= 32;
1699
1700 switch (result_type) {
1701 case PIPE_QUERY_TYPE_U64:
1702 case PIPE_QUERY_TYPE_I64:
1703 consts.config |= 64;
1704 break;
1705 case PIPE_QUERY_TYPE_I32:
1706 consts.config |= 128;
1707 break;
1708 case PIPE_QUERY_TYPE_U32:
1709 break;
1710 }
1711
1712 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1713
1714 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1715 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1716 qbuf_prev = qbuf->previous;
1717 consts.result_count = qbuf->results_end / query->result_size;
1718 consts.config &= ~3;
1719 if (qbuf != &query->buffer)
1720 consts.config |= 1;
1721 if (qbuf->previous)
1722 consts.config |= 2;
1723 } else {
1724 /* Only read the last timestamp. */
1725 qbuf_prev = NULL;
1726 consts.result_count = 0;
1727 consts.config |= 16;
1728 params.start_offset += qbuf->results_end - query->result_size;
1729 }
1730
1731 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1732
1733 ssbo[0].buffer = &qbuf->buf->b.b;
1734 ssbo[0].buffer_offset = params.start_offset;
1735 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1736
1737 if (!qbuf->previous) {
1738 ssbo[2].buffer = resource;
1739 ssbo[2].buffer_offset = offset;
1740 ssbo[2].buffer_size = 8;
1741
1742 }
1743
1744 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1745
1746 if (wait && qbuf == &query->buffer) {
1747 uint64_t va;
1748
1749 /* Wait for result availability. Wait only for readiness
1750 * of the last entry, since the fence writes should be
1751 * serialized in the CP.
1752 */
1753 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1754 va += params.fence_offset;
1755
1756 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1757 }
1758
1759 rctx->b.launch_grid(&rctx->b, &grid);
1760 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1761 }
1762
1763 r600_restore_qbo_state(rctx, &saved_state);
1764 pipe_resource_reference(&tmp_buffer, NULL);
1765 }
1766
1767 static void r600_render_condition(struct pipe_context *ctx,
1768 struct pipe_query *query,
1769 boolean condition,
1770 enum pipe_render_cond_flag mode)
1771 {
1772 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1773 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1774 struct r600_query_buffer *qbuf;
1775 struct r600_atom *atom = &rctx->render_cond_atom;
1776
1777 /* Compute the size of SET_PREDICATION packets. */
1778 atom->num_dw = 0;
1779 if (query) {
1780 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1781 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1782
1783 if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1784 atom->num_dw *= R600_MAX_STREAMS;
1785 }
1786
1787 rctx->render_cond = query;
1788 rctx->render_cond_invert = condition;
1789 rctx->render_cond_mode = mode;
1790
1791 rctx->set_atom_dirty(rctx, atom, query != NULL);
1792 }
1793
1794 void r600_suspend_queries(struct r600_common_context *ctx)
1795 {
1796 struct r600_query_hw *query;
1797
1798 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1799 r600_query_hw_emit_stop(ctx, query);
1800 }
1801 assert(ctx->num_cs_dw_queries_suspend == 0);
1802 }
1803
1804 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1805 struct list_head *query_list)
1806 {
1807 struct r600_query_hw *query;
1808 unsigned num_dw = 0;
1809
1810 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1811 /* begin + end */
1812 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1813
1814 /* Workaround for the fact that
1815 * num_cs_dw_nontimer_queries_suspend is incremented for every
1816 * resumed query, which raises the bar in need_cs_space for
1817 * queries about to be resumed.
1818 */
1819 num_dw += query->num_cs_dw_end;
1820 }
1821 /* primitives generated query */
1822 num_dw += ctx->streamout.enable_atom.num_dw;
1823 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1824 num_dw += 13;
1825
1826 return num_dw;
1827 }
1828
1829 void r600_resume_queries(struct r600_common_context *ctx)
1830 {
1831 struct r600_query_hw *query;
1832 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1833
1834 assert(ctx->num_cs_dw_queries_suspend == 0);
1835
1836 /* Check CS space here. Resuming must not be interrupted by flushes. */
1837 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1838
1839 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1840 r600_query_hw_emit_start(ctx, query);
1841 }
1842 }
1843
1844 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1845 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1846 {
1847 struct r600_common_context *ctx =
1848 (struct r600_common_context*)rscreen->aux_context;
1849 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1850 struct r600_resource *buffer;
1851 uint32_t *results;
1852 unsigned i, mask = 0;
1853 unsigned max_rbs = ctx->screen->info.num_render_backends;
1854
1855 assert(rscreen->chip_class <= CAYMAN);
1856
1857 /* if backend_map query is supported by the kernel */
1858 if (rscreen->info.r600_gb_backend_map_valid) {
1859 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1860 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1861 unsigned item_width, item_mask;
1862
1863 if (ctx->chip_class >= EVERGREEN) {
1864 item_width = 4;
1865 item_mask = 0x7;
1866 } else {
1867 item_width = 2;
1868 item_mask = 0x3;
1869 }
1870
1871 while (num_tile_pipes--) {
1872 i = backend_map & item_mask;
1873 mask |= (1<<i);
1874 backend_map >>= item_width;
1875 }
1876 if (mask != 0) {
1877 rscreen->info.enabled_rb_mask = mask;
1878 return;
1879 }
1880 }
1881
1882 /* otherwise backup path for older kernels */
1883
1884 /* create buffer for event data */
1885 buffer = (struct r600_resource*)
1886 pipe_buffer_create(ctx->b.screen, 0,
1887 PIPE_USAGE_STAGING, max_rbs * 16);
1888 if (!buffer)
1889 return;
1890
1891 /* initialize buffer with zeroes */
1892 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1893 if (results) {
1894 memset(results, 0, max_rbs * 4 * 4);
1895
1896 /* emit EVENT_WRITE for ZPASS_DONE */
1897 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1898 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1899 radeon_emit(cs, buffer->gpu_address);
1900 radeon_emit(cs, buffer->gpu_address >> 32);
1901
1902 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1903 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1904
1905 /* analyze results */
1906 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1907 if (results) {
1908 for(i = 0; i < max_rbs; i++) {
1909 /* at least highest bit will be set if backend is used */
1910 if (results[i*4 + 1])
1911 mask |= (1<<i);
1912 }
1913 }
1914 }
1915
1916 r600_resource_reference(&buffer, NULL);
1917
1918 if (mask)
1919 rscreen->info.enabled_rb_mask = mask;
1920 }
1921
1922 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1923 { \
1924 .name = name_, \
1925 .query_type = R600_QUERY_##query_type_, \
1926 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1927 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1928 .group_id = group_id_ \
1929 }
1930
1931 #define X(name_, query_type_, type_, result_type_) \
1932 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1933
1934 #define XG(group_, name_, query_type_, type_, result_type_) \
1935 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1936
1937 static struct pipe_driver_query_info r600_driver_query_list[] = {
1938 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1939 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1940 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1941 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1942 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
1943 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1944 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1945 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1946 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1947 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1948 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1949 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1950 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1951 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1952 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1953 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1954 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1955 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1956 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1957 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1958 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1959 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1960 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1961 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1962 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1963 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1964 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1965 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1966 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1967 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1968 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1969 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1970 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1971 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1972 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1973 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1974 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1975 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1976 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1977 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1978
1979 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1980 * which use it as a fallback path to detect the GPU type.
1981 *
1982 * Note: The names of these queries are significant for GPUPerfStudio
1983 * (and possibly their order as well). */
1984 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1985 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1986 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1987 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1988 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1989
1990 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1991 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1992 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1993
1994 /* The following queries must be at the end of the list because their
1995 * availability is adjusted dynamically based on the DRM version. */
1996 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1997 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1998 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1999 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
2000 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
2001 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
2002 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
2003 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
2004 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
2005 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
2006 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
2007 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
2008 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
2009 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
2010 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
2011 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
2012 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
2013 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
2014 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
2015 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
2016 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
2017 };
2018
2019 #undef X
2020 #undef XG
2021 #undef XFULL
2022
2023 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2024 {
2025 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
2026 return ARRAY_SIZE(r600_driver_query_list);
2027 else
2028 return ARRAY_SIZE(r600_driver_query_list) - 25;
2029 }
2030
2031 static int r600_get_driver_query_info(struct pipe_screen *screen,
2032 unsigned index,
2033 struct pipe_driver_query_info *info)
2034 {
2035 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2036 unsigned num_queries = r600_get_num_queries(rscreen);
2037
2038 if (!info) {
2039 unsigned num_perfcounters =
2040 r600_get_perfcounter_info(rscreen, 0, NULL);
2041
2042 return num_queries + num_perfcounters;
2043 }
2044
2045 if (index >= num_queries)
2046 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2047
2048 *info = r600_driver_query_list[index];
2049
2050 switch (info->query_type) {
2051 case R600_QUERY_REQUESTED_VRAM:
2052 case R600_QUERY_VRAM_USAGE:
2053 case R600_QUERY_MAPPED_VRAM:
2054 info->max_value.u64 = rscreen->info.vram_size;
2055 break;
2056 case R600_QUERY_REQUESTED_GTT:
2057 case R600_QUERY_GTT_USAGE:
2058 case R600_QUERY_MAPPED_GTT:
2059 info->max_value.u64 = rscreen->info.gart_size;
2060 break;
2061 case R600_QUERY_GPU_TEMPERATURE:
2062 info->max_value.u64 = 125;
2063 break;
2064 case R600_QUERY_VRAM_VIS_USAGE:
2065 info->max_value.u64 = rscreen->info.vram_vis_size;
2066 break;
2067 }
2068
2069 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2070 info->group_id += rscreen->perfcounters->num_groups;
2071
2072 return 1;
2073 }
2074
2075 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2076 * performance counter groups, so be careful when changing this and related
2077 * functions.
2078 */
2079 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2080 unsigned index,
2081 struct pipe_driver_query_group_info *info)
2082 {
2083 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2084 unsigned num_pc_groups = 0;
2085
2086 if (rscreen->perfcounters)
2087 num_pc_groups = rscreen->perfcounters->num_groups;
2088
2089 if (!info)
2090 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2091
2092 if (index < num_pc_groups)
2093 return r600_get_perfcounter_group_info(rscreen, index, info);
2094
2095 index -= num_pc_groups;
2096 if (index >= R600_NUM_SW_QUERY_GROUPS)
2097 return 0;
2098
2099 info->name = "GPIN";
2100 info->max_active_queries = 5;
2101 info->num_queries = 5;
2102 return 1;
2103 }
2104
2105 void r600_query_init(struct r600_common_context *rctx)
2106 {
2107 rctx->b.create_query = r600_create_query;
2108 rctx->b.create_batch_query = r600_create_batch_query;
2109 rctx->b.destroy_query = r600_destroy_query;
2110 rctx->b.begin_query = r600_begin_query;
2111 rctx->b.end_query = r600_end_query;
2112 rctx->b.get_query_result = r600_get_query_result;
2113 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2114 rctx->render_cond_atom.emit = r600_emit_query_predication;
2115
2116 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2117 rctx->b.render_condition = r600_render_condition;
2118
2119 LIST_INITHEAD(&rctx->active_queries);
2120 }
2121
2122 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2123 {
2124 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2125 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2126 }