ac: change legacy_surf_level::slice_size to dword units
[mesa.git] / src / gallium / drivers / r600 / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_pipe.h"
27 #include "r600_cs.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include "util/os_time.h"
31 #include "tgsi/tgsi_text.h"
32
33 #define R600_MAX_STREAMS 4
34
35 struct r600_hw_query_params {
36 unsigned start_offset;
37 unsigned end_offset;
38 unsigned fence_offset;
39 unsigned pair_stride;
40 unsigned pair_count;
41 };
42
43 /* Queries without buffer handling or suspend/resume. */
44 struct r600_query_sw {
45 struct r600_query b;
46
47 uint64_t begin_result;
48 uint64_t end_result;
49
50 uint64_t begin_time;
51 uint64_t end_time;
52
53 /* Fence for GPU_FINISHED. */
54 struct pipe_fence_handle *fence;
55 };
56
57 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
58 struct r600_query *rquery)
59 {
60 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
61
62 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
63 FREE(query);
64 }
65
66 static enum radeon_value_id winsys_id_from_type(unsigned type)
67 {
68 switch (type) {
69 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
70 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
71 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
72 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
73 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
74 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
75 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
76 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
77 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
78 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
79 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
80 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
81 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
82 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
83 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
84 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
85 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
86 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
87 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
88 default: unreachable("query type does not correspond to winsys id");
89 }
90 }
91
92 static bool r600_query_sw_begin(struct r600_common_context *rctx,
93 struct r600_query *rquery)
94 {
95 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
96 enum radeon_value_id ws_id;
97
98 switch(query->b.type) {
99 case PIPE_QUERY_TIMESTAMP_DISJOINT:
100 case PIPE_QUERY_GPU_FINISHED:
101 break;
102 case R600_QUERY_DRAW_CALLS:
103 query->begin_result = rctx->num_draw_calls;
104 break;
105 case R600_QUERY_DECOMPRESS_CALLS:
106 query->begin_result = rctx->num_decompress_calls;
107 break;
108 case R600_QUERY_MRT_DRAW_CALLS:
109 query->begin_result = rctx->num_mrt_draw_calls;
110 break;
111 case R600_QUERY_PRIM_RESTART_CALLS:
112 query->begin_result = rctx->num_prim_restart_calls;
113 break;
114 case R600_QUERY_SPILL_DRAW_CALLS:
115 query->begin_result = rctx->num_spill_draw_calls;
116 break;
117 case R600_QUERY_COMPUTE_CALLS:
118 query->begin_result = rctx->num_compute_calls;
119 break;
120 case R600_QUERY_SPILL_COMPUTE_CALLS:
121 query->begin_result = rctx->num_spill_compute_calls;
122 break;
123 case R600_QUERY_DMA_CALLS:
124 query->begin_result = rctx->num_dma_calls;
125 break;
126 case R600_QUERY_CP_DMA_CALLS:
127 query->begin_result = rctx->num_cp_dma_calls;
128 break;
129 case R600_QUERY_NUM_VS_FLUSHES:
130 query->begin_result = rctx->num_vs_flushes;
131 break;
132 case R600_QUERY_NUM_PS_FLUSHES:
133 query->begin_result = rctx->num_ps_flushes;
134 break;
135 case R600_QUERY_NUM_CS_FLUSHES:
136 query->begin_result = rctx->num_cs_flushes;
137 break;
138 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
139 query->begin_result = rctx->num_cb_cache_flushes;
140 break;
141 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
142 query->begin_result = rctx->num_db_cache_flushes;
143 break;
144 case R600_QUERY_NUM_L2_INVALIDATES:
145 query->begin_result = rctx->num_L2_invalidates;
146 break;
147 case R600_QUERY_NUM_L2_WRITEBACKS:
148 query->begin_result = rctx->num_L2_writebacks;
149 break;
150 case R600_QUERY_NUM_RESIDENT_HANDLES:
151 query->begin_result = rctx->num_resident_handles;
152 break;
153 case R600_QUERY_TC_OFFLOADED_SLOTS:
154 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
155 break;
156 case R600_QUERY_TC_DIRECT_SLOTS:
157 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
158 break;
159 case R600_QUERY_TC_NUM_SYNCS:
160 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
161 break;
162 case R600_QUERY_REQUESTED_VRAM:
163 case R600_QUERY_REQUESTED_GTT:
164 case R600_QUERY_MAPPED_VRAM:
165 case R600_QUERY_MAPPED_GTT:
166 case R600_QUERY_VRAM_USAGE:
167 case R600_QUERY_VRAM_VIS_USAGE:
168 case R600_QUERY_GTT_USAGE:
169 case R600_QUERY_GPU_TEMPERATURE:
170 case R600_QUERY_CURRENT_GPU_SCLK:
171 case R600_QUERY_CURRENT_GPU_MCLK:
172 case R600_QUERY_NUM_MAPPED_BUFFERS:
173 query->begin_result = 0;
174 break;
175 case R600_QUERY_BUFFER_WAIT_TIME:
176 case R600_QUERY_NUM_GFX_IBS:
177 case R600_QUERY_NUM_SDMA_IBS:
178 case R600_QUERY_NUM_BYTES_MOVED:
179 case R600_QUERY_NUM_EVICTIONS:
180 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
181 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
182 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
183 break;
184 }
185 case R600_QUERY_GFX_BO_LIST_SIZE:
186 ws_id = winsys_id_from_type(query->b.type);
187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188 query->begin_time = rctx->ws->query_value(rctx->ws,
189 RADEON_NUM_GFX_IBS);
190 break;
191 case R600_QUERY_CS_THREAD_BUSY:
192 ws_id = winsys_id_from_type(query->b.type);
193 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
194 query->begin_time = os_time_get_nano();
195 break;
196 case R600_QUERY_GALLIUM_THREAD_BUSY:
197 query->begin_result =
198 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
199 query->begin_time = os_time_get_nano();
200 break;
201 case R600_QUERY_GPU_LOAD:
202 case R600_QUERY_GPU_SHADERS_BUSY:
203 case R600_QUERY_GPU_TA_BUSY:
204 case R600_QUERY_GPU_GDS_BUSY:
205 case R600_QUERY_GPU_VGT_BUSY:
206 case R600_QUERY_GPU_IA_BUSY:
207 case R600_QUERY_GPU_SX_BUSY:
208 case R600_QUERY_GPU_WD_BUSY:
209 case R600_QUERY_GPU_BCI_BUSY:
210 case R600_QUERY_GPU_SC_BUSY:
211 case R600_QUERY_GPU_PA_BUSY:
212 case R600_QUERY_GPU_DB_BUSY:
213 case R600_QUERY_GPU_CP_BUSY:
214 case R600_QUERY_GPU_CB_BUSY:
215 case R600_QUERY_GPU_SDMA_BUSY:
216 case R600_QUERY_GPU_PFP_BUSY:
217 case R600_QUERY_GPU_MEQ_BUSY:
218 case R600_QUERY_GPU_ME_BUSY:
219 case R600_QUERY_GPU_SURF_SYNC_BUSY:
220 case R600_QUERY_GPU_CP_DMA_BUSY:
221 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
222 query->begin_result = r600_begin_counter(rctx->screen,
223 query->b.type);
224 break;
225 case R600_QUERY_NUM_COMPILATIONS:
226 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
227 break;
228 case R600_QUERY_NUM_SHADERS_CREATED:
229 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
230 break;
231 case R600_QUERY_NUM_SHADER_CACHE_HITS:
232 query->begin_result =
233 p_atomic_read(&rctx->screen->num_shader_cache_hits);
234 break;
235 case R600_QUERY_GPIN_ASIC_ID:
236 case R600_QUERY_GPIN_NUM_SIMD:
237 case R600_QUERY_GPIN_NUM_RB:
238 case R600_QUERY_GPIN_NUM_SPI:
239 case R600_QUERY_GPIN_NUM_SE:
240 break;
241 default:
242 unreachable("r600_query_sw_begin: bad query type");
243 }
244
245 return true;
246 }
247
248 static bool r600_query_sw_end(struct r600_common_context *rctx,
249 struct r600_query *rquery)
250 {
251 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
252 enum radeon_value_id ws_id;
253
254 switch(query->b.type) {
255 case PIPE_QUERY_TIMESTAMP_DISJOINT:
256 break;
257 case PIPE_QUERY_GPU_FINISHED:
258 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
259 break;
260 case R600_QUERY_DRAW_CALLS:
261 query->end_result = rctx->num_draw_calls;
262 break;
263 case R600_QUERY_DECOMPRESS_CALLS:
264 query->end_result = rctx->num_decompress_calls;
265 break;
266 case R600_QUERY_MRT_DRAW_CALLS:
267 query->end_result = rctx->num_mrt_draw_calls;
268 break;
269 case R600_QUERY_PRIM_RESTART_CALLS:
270 query->end_result = rctx->num_prim_restart_calls;
271 break;
272 case R600_QUERY_SPILL_DRAW_CALLS:
273 query->end_result = rctx->num_spill_draw_calls;
274 break;
275 case R600_QUERY_COMPUTE_CALLS:
276 query->end_result = rctx->num_compute_calls;
277 break;
278 case R600_QUERY_SPILL_COMPUTE_CALLS:
279 query->end_result = rctx->num_spill_compute_calls;
280 break;
281 case R600_QUERY_DMA_CALLS:
282 query->end_result = rctx->num_dma_calls;
283 break;
284 case R600_QUERY_CP_DMA_CALLS:
285 query->end_result = rctx->num_cp_dma_calls;
286 break;
287 case R600_QUERY_NUM_VS_FLUSHES:
288 query->end_result = rctx->num_vs_flushes;
289 break;
290 case R600_QUERY_NUM_PS_FLUSHES:
291 query->end_result = rctx->num_ps_flushes;
292 break;
293 case R600_QUERY_NUM_CS_FLUSHES:
294 query->end_result = rctx->num_cs_flushes;
295 break;
296 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
297 query->end_result = rctx->num_cb_cache_flushes;
298 break;
299 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
300 query->end_result = rctx->num_db_cache_flushes;
301 break;
302 case R600_QUERY_NUM_L2_INVALIDATES:
303 query->end_result = rctx->num_L2_invalidates;
304 break;
305 case R600_QUERY_NUM_L2_WRITEBACKS:
306 query->end_result = rctx->num_L2_writebacks;
307 break;
308 case R600_QUERY_NUM_RESIDENT_HANDLES:
309 query->end_result = rctx->num_resident_handles;
310 break;
311 case R600_QUERY_TC_OFFLOADED_SLOTS:
312 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
313 break;
314 case R600_QUERY_TC_DIRECT_SLOTS:
315 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
316 break;
317 case R600_QUERY_TC_NUM_SYNCS:
318 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
319 break;
320 case R600_QUERY_REQUESTED_VRAM:
321 case R600_QUERY_REQUESTED_GTT:
322 case R600_QUERY_MAPPED_VRAM:
323 case R600_QUERY_MAPPED_GTT:
324 case R600_QUERY_VRAM_USAGE:
325 case R600_QUERY_VRAM_VIS_USAGE:
326 case R600_QUERY_GTT_USAGE:
327 case R600_QUERY_GPU_TEMPERATURE:
328 case R600_QUERY_CURRENT_GPU_SCLK:
329 case R600_QUERY_CURRENT_GPU_MCLK:
330 case R600_QUERY_BUFFER_WAIT_TIME:
331 case R600_QUERY_NUM_MAPPED_BUFFERS:
332 case R600_QUERY_NUM_GFX_IBS:
333 case R600_QUERY_NUM_SDMA_IBS:
334 case R600_QUERY_NUM_BYTES_MOVED:
335 case R600_QUERY_NUM_EVICTIONS:
336 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
337 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
338 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
339 break;
340 }
341 case R600_QUERY_GFX_BO_LIST_SIZE:
342 ws_id = winsys_id_from_type(query->b.type);
343 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
344 query->end_time = rctx->ws->query_value(rctx->ws,
345 RADEON_NUM_GFX_IBS);
346 break;
347 case R600_QUERY_CS_THREAD_BUSY:
348 ws_id = winsys_id_from_type(query->b.type);
349 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
350 query->end_time = os_time_get_nano();
351 break;
352 case R600_QUERY_GALLIUM_THREAD_BUSY:
353 query->end_result =
354 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
355 query->end_time = os_time_get_nano();
356 break;
357 case R600_QUERY_GPU_LOAD:
358 case R600_QUERY_GPU_SHADERS_BUSY:
359 case R600_QUERY_GPU_TA_BUSY:
360 case R600_QUERY_GPU_GDS_BUSY:
361 case R600_QUERY_GPU_VGT_BUSY:
362 case R600_QUERY_GPU_IA_BUSY:
363 case R600_QUERY_GPU_SX_BUSY:
364 case R600_QUERY_GPU_WD_BUSY:
365 case R600_QUERY_GPU_BCI_BUSY:
366 case R600_QUERY_GPU_SC_BUSY:
367 case R600_QUERY_GPU_PA_BUSY:
368 case R600_QUERY_GPU_DB_BUSY:
369 case R600_QUERY_GPU_CP_BUSY:
370 case R600_QUERY_GPU_CB_BUSY:
371 case R600_QUERY_GPU_SDMA_BUSY:
372 case R600_QUERY_GPU_PFP_BUSY:
373 case R600_QUERY_GPU_MEQ_BUSY:
374 case R600_QUERY_GPU_ME_BUSY:
375 case R600_QUERY_GPU_SURF_SYNC_BUSY:
376 case R600_QUERY_GPU_CP_DMA_BUSY:
377 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
378 query->end_result = r600_end_counter(rctx->screen,
379 query->b.type,
380 query->begin_result);
381 query->begin_result = 0;
382 break;
383 case R600_QUERY_NUM_COMPILATIONS:
384 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
385 break;
386 case R600_QUERY_NUM_SHADERS_CREATED:
387 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
388 break;
389 case R600_QUERY_NUM_SHADER_CACHE_HITS:
390 query->end_result =
391 p_atomic_read(&rctx->screen->num_shader_cache_hits);
392 break;
393 case R600_QUERY_GPIN_ASIC_ID:
394 case R600_QUERY_GPIN_NUM_SIMD:
395 case R600_QUERY_GPIN_NUM_RB:
396 case R600_QUERY_GPIN_NUM_SPI:
397 case R600_QUERY_GPIN_NUM_SE:
398 break;
399 default:
400 unreachable("r600_query_sw_end: bad query type");
401 }
402
403 return true;
404 }
405
406 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
407 struct r600_query *rquery,
408 bool wait,
409 union pipe_query_result *result)
410 {
411 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
412
413 switch (query->b.type) {
414 case PIPE_QUERY_TIMESTAMP_DISJOINT:
415 /* Convert from cycles per millisecond to cycles per second (Hz). */
416 result->timestamp_disjoint.frequency =
417 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
418 result->timestamp_disjoint.disjoint = false;
419 return true;
420 case PIPE_QUERY_GPU_FINISHED: {
421 struct pipe_screen *screen = rctx->b.screen;
422 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
423
424 result->b = screen->fence_finish(screen, ctx, query->fence,
425 wait ? PIPE_TIMEOUT_INFINITE : 0);
426 return result->b;
427 }
428
429 case R600_QUERY_GFX_BO_LIST_SIZE:
430 result->u64 = (query->end_result - query->begin_result) /
431 (query->end_time - query->begin_time);
432 return true;
433 case R600_QUERY_CS_THREAD_BUSY:
434 case R600_QUERY_GALLIUM_THREAD_BUSY:
435 result->u64 = (query->end_result - query->begin_result) * 100 /
436 (query->end_time - query->begin_time);
437 return true;
438 case R600_QUERY_GPIN_ASIC_ID:
439 result->u32 = 0;
440 return true;
441 case R600_QUERY_GPIN_NUM_SIMD:
442 result->u32 = rctx->screen->info.num_good_compute_units;
443 return true;
444 case R600_QUERY_GPIN_NUM_RB:
445 result->u32 = rctx->screen->info.num_render_backends;
446 return true;
447 case R600_QUERY_GPIN_NUM_SPI:
448 result->u32 = 1; /* all supported chips have one SPI per SE */
449 return true;
450 case R600_QUERY_GPIN_NUM_SE:
451 result->u32 = rctx->screen->info.max_se;
452 return true;
453 }
454
455 result->u64 = query->end_result - query->begin_result;
456
457 switch (query->b.type) {
458 case R600_QUERY_BUFFER_WAIT_TIME:
459 case R600_QUERY_GPU_TEMPERATURE:
460 result->u64 /= 1000;
461 break;
462 case R600_QUERY_CURRENT_GPU_SCLK:
463 case R600_QUERY_CURRENT_GPU_MCLK:
464 result->u64 *= 1000000;
465 break;
466 }
467
468 return true;
469 }
470
471
472 static struct r600_query_ops sw_query_ops = {
473 .destroy = r600_query_sw_destroy,
474 .begin = r600_query_sw_begin,
475 .end = r600_query_sw_end,
476 .get_result = r600_query_sw_get_result,
477 .get_result_resource = NULL
478 };
479
480 static struct pipe_query *r600_query_sw_create(unsigned query_type)
481 {
482 struct r600_query_sw *query;
483
484 query = CALLOC_STRUCT(r600_query_sw);
485 if (!query)
486 return NULL;
487
488 query->b.type = query_type;
489 query->b.ops = &sw_query_ops;
490
491 return (struct pipe_query *)query;
492 }
493
494 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
495 struct r600_query *rquery)
496 {
497 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
498 struct r600_query_buffer *prev = query->buffer.previous;
499
500 /* Release all query buffers. */
501 while (prev) {
502 struct r600_query_buffer *qbuf = prev;
503 prev = prev->previous;
504 r600_resource_reference(&qbuf->buf, NULL);
505 FREE(qbuf);
506 }
507
508 r600_resource_reference(&query->buffer.buf, NULL);
509 FREE(rquery);
510 }
511
512 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
513 struct r600_query_hw *query)
514 {
515 unsigned buf_size = MAX2(query->result_size,
516 rscreen->info.min_alloc_size);
517
518 /* Queries are normally read by the CPU after
519 * being written by the gpu, hence staging is probably a good
520 * usage pattern.
521 */
522 struct r600_resource *buf = (struct r600_resource*)
523 pipe_buffer_create(&rscreen->b, 0,
524 PIPE_USAGE_STAGING, buf_size);
525 if (!buf)
526 return NULL;
527
528 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
529 r600_resource_reference(&buf, NULL);
530 return NULL;
531 }
532
533 return buf;
534 }
535
536 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
537 struct r600_query_hw *query,
538 struct r600_resource *buffer)
539 {
540 /* Callers ensure that the buffer is currently unused by the GPU. */
541 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
542 PIPE_TRANSFER_WRITE |
543 PIPE_TRANSFER_UNSYNCHRONIZED);
544 if (!results)
545 return false;
546
547 memset(results, 0, buffer->b.b.width0);
548
549 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
550 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
551 unsigned max_rbs = rscreen->info.num_render_backends;
552 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
553 unsigned num_results;
554 unsigned i, j;
555
556 /* Set top bits for unused backends. */
557 num_results = buffer->b.b.width0 / query->result_size;
558 for (j = 0; j < num_results; j++) {
559 for (i = 0; i < max_rbs; i++) {
560 if (!(enabled_rb_mask & (1<<i))) {
561 results[(i * 4)+1] = 0x80000000;
562 results[(i * 4)+3] = 0x80000000;
563 }
564 }
565 results += 4 * max_rbs;
566 }
567 }
568
569 return true;
570 }
571
572 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
573 struct r600_query *rquery,
574 bool wait,
575 enum pipe_query_value_type result_type,
576 int index,
577 struct pipe_resource *resource,
578 unsigned offset);
579
580 static struct r600_query_ops query_hw_ops = {
581 .destroy = r600_query_hw_destroy,
582 .begin = r600_query_hw_begin,
583 .end = r600_query_hw_end,
584 .get_result = r600_query_hw_get_result,
585 .get_result_resource = r600_query_hw_get_result_resource,
586 };
587
588 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
589 struct r600_query_hw *query,
590 struct r600_resource *buffer,
591 uint64_t va);
592 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
593 struct r600_query_hw *query,
594 struct r600_resource *buffer,
595 uint64_t va);
596 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
597 struct r600_query_hw *, void *buffer,
598 union pipe_query_result *result);
599 static void r600_query_hw_clear_result(struct r600_query_hw *,
600 union pipe_query_result *);
601
602 static struct r600_query_hw_ops query_hw_default_hw_ops = {
603 .prepare_buffer = r600_query_hw_prepare_buffer,
604 .emit_start = r600_query_hw_do_emit_start,
605 .emit_stop = r600_query_hw_do_emit_stop,
606 .clear_result = r600_query_hw_clear_result,
607 .add_result = r600_query_hw_add_result,
608 };
609
610 bool r600_query_hw_init(struct r600_common_screen *rscreen,
611 struct r600_query_hw *query)
612 {
613 query->buffer.buf = r600_new_query_buffer(rscreen, query);
614 if (!query->buffer.buf)
615 return false;
616
617 return true;
618 }
619
620 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
621 unsigned query_type,
622 unsigned index)
623 {
624 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
625 if (!query)
626 return NULL;
627
628 query->b.type = query_type;
629 query->b.ops = &query_hw_ops;
630 query->ops = &query_hw_default_hw_ops;
631
632 switch (query_type) {
633 case PIPE_QUERY_OCCLUSION_COUNTER:
634 case PIPE_QUERY_OCCLUSION_PREDICATE:
635 query->result_size = 16 * rscreen->info.num_render_backends;
636 query->result_size += 16; /* for the fence + alignment */
637 query->num_cs_dw_begin = 6;
638 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
639 break;
640 case PIPE_QUERY_TIME_ELAPSED:
641 query->result_size = 24;
642 query->num_cs_dw_begin = 8;
643 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
644 break;
645 case PIPE_QUERY_TIMESTAMP:
646 query->result_size = 16;
647 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
648 query->flags = R600_QUERY_HW_FLAG_NO_START;
649 break;
650 case PIPE_QUERY_PRIMITIVES_EMITTED:
651 case PIPE_QUERY_PRIMITIVES_GENERATED:
652 case PIPE_QUERY_SO_STATISTICS:
653 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
654 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
655 query->result_size = 32;
656 query->num_cs_dw_begin = 6;
657 query->num_cs_dw_end = 6;
658 query->stream = index;
659 break;
660 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
661 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
662 query->result_size = 32 * R600_MAX_STREAMS;
663 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
664 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
665 break;
666 case PIPE_QUERY_PIPELINE_STATISTICS:
667 /* 11 values on EG, 8 on R600. */
668 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
669 query->result_size += 8; /* for the fence + alignment */
670 query->num_cs_dw_begin = 6;
671 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
672 break;
673 default:
674 assert(0);
675 FREE(query);
676 return NULL;
677 }
678
679 if (!r600_query_hw_init(rscreen, query)) {
680 FREE(query);
681 return NULL;
682 }
683
684 return (struct pipe_query *)query;
685 }
686
687 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
688 unsigned type, int diff)
689 {
690 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
691 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
692 bool old_enable = rctx->num_occlusion_queries != 0;
693 bool old_perfect_enable =
694 rctx->num_perfect_occlusion_queries != 0;
695 bool enable, perfect_enable;
696
697 rctx->num_occlusion_queries += diff;
698 assert(rctx->num_occlusion_queries >= 0);
699
700 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
701 rctx->num_perfect_occlusion_queries += diff;
702 assert(rctx->num_perfect_occlusion_queries >= 0);
703 }
704
705 enable = rctx->num_occlusion_queries != 0;
706 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
707
708 if (enable != old_enable || perfect_enable != old_perfect_enable) {
709 struct r600_context *ctx = (struct r600_context*)rctx;
710 r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
711 }
712 }
713 }
714
715 static unsigned event_type_for_stream(unsigned stream)
716 {
717 switch (stream) {
718 default:
719 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
720 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
721 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
722 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
723 }
724 }
725
726 static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
727 unsigned stream)
728 {
729 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
730 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
731 radeon_emit(cs, va);
732 radeon_emit(cs, va >> 32);
733 }
734
735 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
736 struct r600_query_hw *query,
737 struct r600_resource *buffer,
738 uint64_t va)
739 {
740 struct radeon_winsys_cs *cs = ctx->gfx.cs;
741
742 switch (query->b.type) {
743 case PIPE_QUERY_OCCLUSION_COUNTER:
744 case PIPE_QUERY_OCCLUSION_PREDICATE:
745 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
746 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
747 radeon_emit(cs, va);
748 radeon_emit(cs, va >> 32);
749 break;
750 case PIPE_QUERY_PRIMITIVES_EMITTED:
751 case PIPE_QUERY_PRIMITIVES_GENERATED:
752 case PIPE_QUERY_SO_STATISTICS:
753 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
754 emit_sample_streamout(cs, va, query->stream);
755 break;
756 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
757 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
758 emit_sample_streamout(cs, va + 32 * stream, stream);
759 break;
760 case PIPE_QUERY_TIME_ELAPSED:
761 /* Write the timestamp after the last draw is done.
762 * (bottom-of-pipe)
763 */
764 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
765 0, EOP_DATA_SEL_TIMESTAMP,
766 NULL, va, 0, query->b.type);
767 break;
768 case PIPE_QUERY_PIPELINE_STATISTICS:
769 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
770 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
771 radeon_emit(cs, va);
772 radeon_emit(cs, va >> 32);
773 break;
774 default:
775 assert(0);
776 }
777 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
778 RADEON_PRIO_QUERY);
779 }
780
781 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
782 struct r600_query_hw *query)
783 {
784 uint64_t va;
785
786 if (!query->buffer.buf)
787 return; // previous buffer allocation failure
788
789 r600_update_occlusion_query_state(ctx, query->b.type, 1);
790 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
791
792 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
793 true);
794
795 /* Get a new query buffer if needed. */
796 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
797 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
798 *qbuf = query->buffer;
799 query->buffer.results_end = 0;
800 query->buffer.previous = qbuf;
801 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
802 if (!query->buffer.buf)
803 return;
804 }
805
806 /* emit begin query */
807 va = query->buffer.buf->gpu_address + query->buffer.results_end;
808
809 query->ops->emit_start(ctx, query, query->buffer.buf, va);
810
811 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
812 }
813
814 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
815 struct r600_query_hw *query,
816 struct r600_resource *buffer,
817 uint64_t va)
818 {
819 struct radeon_winsys_cs *cs = ctx->gfx.cs;
820 uint64_t fence_va = 0;
821
822 switch (query->b.type) {
823 case PIPE_QUERY_OCCLUSION_COUNTER:
824 case PIPE_QUERY_OCCLUSION_PREDICATE:
825 va += 8;
826 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
827 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
828 radeon_emit(cs, va);
829 radeon_emit(cs, va >> 32);
830
831 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
832 break;
833 case PIPE_QUERY_PRIMITIVES_EMITTED:
834 case PIPE_QUERY_PRIMITIVES_GENERATED:
835 case PIPE_QUERY_SO_STATISTICS:
836 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
837 va += 16;
838 emit_sample_streamout(cs, va, query->stream);
839 break;
840 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
841 va += 16;
842 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
843 emit_sample_streamout(cs, va + 32 * stream, stream);
844 break;
845 case PIPE_QUERY_TIME_ELAPSED:
846 va += 8;
847 /* fall through */
848 case PIPE_QUERY_TIMESTAMP:
849 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
850 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
851 0, query->b.type);
852 fence_va = va + 8;
853 break;
854 case PIPE_QUERY_PIPELINE_STATISTICS: {
855 unsigned sample_size = (query->result_size - 8) / 2;
856
857 va += sample_size;
858 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
859 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
860 radeon_emit(cs, va);
861 radeon_emit(cs, va >> 32);
862
863 fence_va = va + sample_size;
864 break;
865 }
866 default:
867 assert(0);
868 }
869 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
870 RADEON_PRIO_QUERY);
871
872 if (fence_va)
873 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
874 EOP_DATA_SEL_VALUE_32BIT,
875 query->buffer.buf, fence_va, 0x80000000,
876 query->b.type);
877 }
878
879 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
880 struct r600_query_hw *query)
881 {
882 uint64_t va;
883
884 if (!query->buffer.buf)
885 return; // previous buffer allocation failure
886
887 /* The queries which need begin already called this in begin_query. */
888 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
889 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
890 }
891
892 /* emit end query */
893 va = query->buffer.buf->gpu_address + query->buffer.results_end;
894
895 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
896
897 query->buffer.results_end += query->result_size;
898
899 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
900 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
901
902 r600_update_occlusion_query_state(ctx, query->b.type, -1);
903 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
904 }
905
906 static void emit_set_predicate(struct r600_common_context *ctx,
907 struct r600_resource *buf, uint64_t va,
908 uint32_t op)
909 {
910 struct radeon_winsys_cs *cs = ctx->gfx.cs;
911
912 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
913 radeon_emit(cs, va);
914 radeon_emit(cs, op | ((va >> 32) & 0xFF));
915 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
916 RADEON_PRIO_QUERY);
917 }
918
919 static void r600_emit_query_predication(struct r600_common_context *ctx,
920 struct r600_atom *atom)
921 {
922 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
923 struct r600_query_buffer *qbuf;
924 uint32_t op;
925 bool flag_wait, invert;
926
927 if (!query)
928 return;
929
930 invert = ctx->render_cond_invert;
931 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
932 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
933
934 switch (query->b.type) {
935 case PIPE_QUERY_OCCLUSION_COUNTER:
936 case PIPE_QUERY_OCCLUSION_PREDICATE:
937 op = PRED_OP(PREDICATION_OP_ZPASS);
938 break;
939 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
940 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
941 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
942 invert = !invert;
943 break;
944 default:
945 assert(0);
946 return;
947 }
948
949 /* if true then invert, see GL_ARB_conditional_render_inverted */
950 if (invert)
951 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
952 else
953 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
954
955 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
956
957 /* emit predicate packets for all data blocks */
958 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
959 unsigned results_base = 0;
960 uint64_t va_base = qbuf->buf->gpu_address;
961
962 while (results_base < qbuf->results_end) {
963 uint64_t va = va_base + results_base;
964
965 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
966 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
967 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
968
969 /* set CONTINUE bit for all packets except the first */
970 op |= PREDICATION_CONTINUE;
971 }
972 } else {
973 emit_set_predicate(ctx, qbuf->buf, va, op);
974 op |= PREDICATION_CONTINUE;
975 }
976
977 results_base += query->result_size;
978 }
979 }
980 }
981
982 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
983 {
984 struct r600_common_screen *rscreen =
985 (struct r600_common_screen *)ctx->screen;
986
987 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
988 query_type == PIPE_QUERY_GPU_FINISHED ||
989 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
990 return r600_query_sw_create(query_type);
991
992 return r600_query_hw_create(rscreen, query_type, index);
993 }
994
995 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
996 {
997 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
998 struct r600_query *rquery = (struct r600_query *)query;
999
1000 rquery->ops->destroy(rctx->screen, rquery);
1001 }
1002
1003 static boolean r600_begin_query(struct pipe_context *ctx,
1004 struct pipe_query *query)
1005 {
1006 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1007 struct r600_query *rquery = (struct r600_query *)query;
1008
1009 return rquery->ops->begin(rctx, rquery);
1010 }
1011
1012 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
1013 struct r600_query_hw *query)
1014 {
1015 struct r600_query_buffer *prev = query->buffer.previous;
1016
1017 /* Discard the old query buffers. */
1018 while (prev) {
1019 struct r600_query_buffer *qbuf = prev;
1020 prev = prev->previous;
1021 r600_resource_reference(&qbuf->buf, NULL);
1022 FREE(qbuf);
1023 }
1024
1025 query->buffer.results_end = 0;
1026 query->buffer.previous = NULL;
1027
1028 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1029 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1030 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1031 r600_resource_reference(&query->buffer.buf, NULL);
1032 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1033 } else {
1034 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1035 r600_resource_reference(&query->buffer.buf, NULL);
1036 }
1037 }
1038
1039 bool r600_query_hw_begin(struct r600_common_context *rctx,
1040 struct r600_query *rquery)
1041 {
1042 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1043
1044 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1045 assert(0);
1046 return false;
1047 }
1048
1049 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1050 r600_query_hw_reset_buffers(rctx, query);
1051
1052 r600_query_hw_emit_start(rctx, query);
1053 if (!query->buffer.buf)
1054 return false;
1055
1056 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1057 return true;
1058 }
1059
1060 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1061 {
1062 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1063 struct r600_query *rquery = (struct r600_query *)query;
1064
1065 return rquery->ops->end(rctx, rquery);
1066 }
1067
1068 bool r600_query_hw_end(struct r600_common_context *rctx,
1069 struct r600_query *rquery)
1070 {
1071 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1072
1073 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1074 r600_query_hw_reset_buffers(rctx, query);
1075
1076 r600_query_hw_emit_stop(rctx, query);
1077
1078 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1079 LIST_DELINIT(&query->list);
1080
1081 if (!query->buffer.buf)
1082 return false;
1083
1084 return true;
1085 }
1086
1087 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1088 struct r600_query_hw *rquery, int index,
1089 struct r600_hw_query_params *params)
1090 {
1091 unsigned max_rbs = rctx->screen->info.num_render_backends;
1092
1093 params->pair_stride = 0;
1094 params->pair_count = 1;
1095
1096 switch (rquery->b.type) {
1097 case PIPE_QUERY_OCCLUSION_COUNTER:
1098 case PIPE_QUERY_OCCLUSION_PREDICATE:
1099 params->start_offset = 0;
1100 params->end_offset = 8;
1101 params->fence_offset = max_rbs * 16;
1102 params->pair_stride = 16;
1103 params->pair_count = max_rbs;
1104 break;
1105 case PIPE_QUERY_TIME_ELAPSED:
1106 params->start_offset = 0;
1107 params->end_offset = 8;
1108 params->fence_offset = 16;
1109 break;
1110 case PIPE_QUERY_TIMESTAMP:
1111 params->start_offset = 0;
1112 params->end_offset = 0;
1113 params->fence_offset = 8;
1114 break;
1115 case PIPE_QUERY_PRIMITIVES_EMITTED:
1116 params->start_offset = 8;
1117 params->end_offset = 24;
1118 params->fence_offset = params->end_offset + 4;
1119 break;
1120 case PIPE_QUERY_PRIMITIVES_GENERATED:
1121 params->start_offset = 0;
1122 params->end_offset = 16;
1123 params->fence_offset = params->end_offset + 4;
1124 break;
1125 case PIPE_QUERY_SO_STATISTICS:
1126 params->start_offset = 8 - index * 8;
1127 params->end_offset = 24 - index * 8;
1128 params->fence_offset = params->end_offset + 4;
1129 break;
1130 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1131 params->pair_count = R600_MAX_STREAMS;
1132 params->pair_stride = 32;
1133 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1134 params->start_offset = 0;
1135 params->end_offset = 16;
1136
1137 /* We can re-use the high dword of the last 64-bit value as a
1138 * fence: it is initialized as 0, and the high bit is set by
1139 * the write of the streamout stats event.
1140 */
1141 params->fence_offset = rquery->result_size - 4;
1142 break;
1143 case PIPE_QUERY_PIPELINE_STATISTICS:
1144 {
1145 /* Offsets apply to EG+ */
1146 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1147 params->start_offset = offsets[index];
1148 params->end_offset = 88 + offsets[index];
1149 params->fence_offset = 2 * 88;
1150 break;
1151 }
1152 default:
1153 unreachable("r600_get_hw_query_params unsupported");
1154 }
1155 }
1156
1157 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1158 bool test_status_bit)
1159 {
1160 uint32_t *current_result = (uint32_t*)map;
1161 uint64_t start, end;
1162
1163 start = (uint64_t)current_result[start_index] |
1164 (uint64_t)current_result[start_index+1] << 32;
1165 end = (uint64_t)current_result[end_index] |
1166 (uint64_t)current_result[end_index+1] << 32;
1167
1168 if (!test_status_bit ||
1169 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1170 return end - start;
1171 }
1172 return 0;
1173 }
1174
1175 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1176 struct r600_query_hw *query,
1177 void *buffer,
1178 union pipe_query_result *result)
1179 {
1180 unsigned max_rbs = rscreen->info.num_render_backends;
1181
1182 switch (query->b.type) {
1183 case PIPE_QUERY_OCCLUSION_COUNTER: {
1184 for (unsigned i = 0; i < max_rbs; ++i) {
1185 unsigned results_base = i * 16;
1186 result->u64 +=
1187 r600_query_read_result(buffer + results_base, 0, 2, true);
1188 }
1189 break;
1190 }
1191 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1192 for (unsigned i = 0; i < max_rbs; ++i) {
1193 unsigned results_base = i * 16;
1194 result->b = result->b ||
1195 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1196 }
1197 break;
1198 }
1199 case PIPE_QUERY_TIME_ELAPSED:
1200 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1201 break;
1202 case PIPE_QUERY_TIMESTAMP:
1203 result->u64 = *(uint64_t*)buffer;
1204 break;
1205 case PIPE_QUERY_PRIMITIVES_EMITTED:
1206 /* SAMPLE_STREAMOUTSTATS stores this structure:
1207 * {
1208 * u64 NumPrimitivesWritten;
1209 * u64 PrimitiveStorageNeeded;
1210 * }
1211 * We only need NumPrimitivesWritten here. */
1212 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1213 break;
1214 case PIPE_QUERY_PRIMITIVES_GENERATED:
1215 /* Here we read PrimitiveStorageNeeded. */
1216 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1217 break;
1218 case PIPE_QUERY_SO_STATISTICS:
1219 result->so_statistics.num_primitives_written +=
1220 r600_query_read_result(buffer, 2, 6, true);
1221 result->so_statistics.primitives_storage_needed +=
1222 r600_query_read_result(buffer, 0, 4, true);
1223 break;
1224 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1225 result->b = result->b ||
1226 r600_query_read_result(buffer, 2, 6, true) !=
1227 r600_query_read_result(buffer, 0, 4, true);
1228 break;
1229 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1230 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1231 result->b = result->b ||
1232 r600_query_read_result(buffer, 2, 6, true) !=
1233 r600_query_read_result(buffer, 0, 4, true);
1234 buffer = (char *)buffer + 32;
1235 }
1236 break;
1237 case PIPE_QUERY_PIPELINE_STATISTICS:
1238 if (rscreen->chip_class >= EVERGREEN) {
1239 result->pipeline_statistics.ps_invocations +=
1240 r600_query_read_result(buffer, 0, 22, false);
1241 result->pipeline_statistics.c_primitives +=
1242 r600_query_read_result(buffer, 2, 24, false);
1243 result->pipeline_statistics.c_invocations +=
1244 r600_query_read_result(buffer, 4, 26, false);
1245 result->pipeline_statistics.vs_invocations +=
1246 r600_query_read_result(buffer, 6, 28, false);
1247 result->pipeline_statistics.gs_invocations +=
1248 r600_query_read_result(buffer, 8, 30, false);
1249 result->pipeline_statistics.gs_primitives +=
1250 r600_query_read_result(buffer, 10, 32, false);
1251 result->pipeline_statistics.ia_primitives +=
1252 r600_query_read_result(buffer, 12, 34, false);
1253 result->pipeline_statistics.ia_vertices +=
1254 r600_query_read_result(buffer, 14, 36, false);
1255 result->pipeline_statistics.hs_invocations +=
1256 r600_query_read_result(buffer, 16, 38, false);
1257 result->pipeline_statistics.ds_invocations +=
1258 r600_query_read_result(buffer, 18, 40, false);
1259 result->pipeline_statistics.cs_invocations +=
1260 r600_query_read_result(buffer, 20, 42, false);
1261 } else {
1262 result->pipeline_statistics.ps_invocations +=
1263 r600_query_read_result(buffer, 0, 16, false);
1264 result->pipeline_statistics.c_primitives +=
1265 r600_query_read_result(buffer, 2, 18, false);
1266 result->pipeline_statistics.c_invocations +=
1267 r600_query_read_result(buffer, 4, 20, false);
1268 result->pipeline_statistics.vs_invocations +=
1269 r600_query_read_result(buffer, 6, 22, false);
1270 result->pipeline_statistics.gs_invocations +=
1271 r600_query_read_result(buffer, 8, 24, false);
1272 result->pipeline_statistics.gs_primitives +=
1273 r600_query_read_result(buffer, 10, 26, false);
1274 result->pipeline_statistics.ia_primitives +=
1275 r600_query_read_result(buffer, 12, 28, false);
1276 result->pipeline_statistics.ia_vertices +=
1277 r600_query_read_result(buffer, 14, 30, false);
1278 }
1279 #if 0 /* for testing */
1280 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1281 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1282 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1283 result->pipeline_statistics.ia_vertices,
1284 result->pipeline_statistics.ia_primitives,
1285 result->pipeline_statistics.vs_invocations,
1286 result->pipeline_statistics.hs_invocations,
1287 result->pipeline_statistics.ds_invocations,
1288 result->pipeline_statistics.gs_invocations,
1289 result->pipeline_statistics.gs_primitives,
1290 result->pipeline_statistics.c_invocations,
1291 result->pipeline_statistics.c_primitives,
1292 result->pipeline_statistics.ps_invocations,
1293 result->pipeline_statistics.cs_invocations);
1294 #endif
1295 break;
1296 default:
1297 assert(0);
1298 }
1299 }
1300
1301 static boolean r600_get_query_result(struct pipe_context *ctx,
1302 struct pipe_query *query, boolean wait,
1303 union pipe_query_result *result)
1304 {
1305 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1306 struct r600_query *rquery = (struct r600_query *)query;
1307
1308 return rquery->ops->get_result(rctx, rquery, wait, result);
1309 }
1310
1311 static void r600_get_query_result_resource(struct pipe_context *ctx,
1312 struct pipe_query *query,
1313 boolean wait,
1314 enum pipe_query_value_type result_type,
1315 int index,
1316 struct pipe_resource *resource,
1317 unsigned offset)
1318 {
1319 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1320 struct r600_query *rquery = (struct r600_query *)query;
1321
1322 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1323 resource, offset);
1324 }
1325
1326 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1327 union pipe_query_result *result)
1328 {
1329 util_query_clear_result(result, query->b.type);
1330 }
1331
1332 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1333 struct r600_query *rquery,
1334 bool wait, union pipe_query_result *result)
1335 {
1336 struct r600_common_screen *rscreen = rctx->screen;
1337 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1338 struct r600_query_buffer *qbuf;
1339
1340 query->ops->clear_result(query, result);
1341
1342 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1343 unsigned usage = PIPE_TRANSFER_READ |
1344 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1345 unsigned results_base = 0;
1346 void *map;
1347
1348 if (rquery->b.flushed)
1349 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1350 else
1351 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1352
1353 if (!map)
1354 return false;
1355
1356 while (results_base != qbuf->results_end) {
1357 query->ops->add_result(rscreen, query, map + results_base,
1358 result);
1359 results_base += query->result_size;
1360 }
1361 }
1362
1363 /* Convert the time to expected units. */
1364 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1365 rquery->type == PIPE_QUERY_TIMESTAMP) {
1366 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1367 }
1368 return true;
1369 }
1370
1371 /* Create the compute shader that is used to collect the results.
1372 *
1373 * One compute grid with a single thread is launched for every query result
1374 * buffer. The thread (optionally) reads a previous summary buffer, then
1375 * accumulates data from the query result buffer, and writes the result either
1376 * to a summary buffer to be consumed by the next grid invocation or to the
1377 * user-supplied buffer.
1378 *
1379 * Data layout:
1380 *
1381 * CONST
1382 * 0.x = end_offset
1383 * 0.y = result_stride
1384 * 0.z = result_count
1385 * 0.w = bit field:
1386 * 1: read previously accumulated values
1387 * 2: write accumulated values for chaining
1388 * 4: write result available
1389 * 8: convert result to boolean (0/1)
1390 * 16: only read one dword and use that as result
1391 * 32: apply timestamp conversion
1392 * 64: store full 64 bits result
1393 * 128: store signed 32 bits result
1394 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1395 * 1.x = fence_offset
1396 * 1.y = pair_stride
1397 * 1.z = pair_count
1398 *
1399 * BUFFER[0] = query result buffer
1400 * BUFFER[1] = previous summary buffer
1401 * BUFFER[2] = next summary buffer or user-supplied buffer
1402 */
1403 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1404 {
1405 /* TEMP[0].xy = accumulated result so far
1406 * TEMP[0].z = result not available
1407 *
1408 * TEMP[1].x = current result index
1409 * TEMP[1].y = current pair index
1410 */
1411 static const char text_tmpl[] =
1412 "COMP\n"
1413 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1414 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1415 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1416 "DCL BUFFER[0]\n"
1417 "DCL BUFFER[1]\n"
1418 "DCL BUFFER[2]\n"
1419 "DCL CONST[0][0..1]\n"
1420 "DCL TEMP[0..5]\n"
1421 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1422 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1423 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1424 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1425 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1426
1427 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1428 "UIF TEMP[5]\n"
1429 /* Check result availability. */
1430 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
1431 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1432 "MOV TEMP[1], TEMP[0].zzzz\n"
1433 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1434
1435 /* Load result if available. */
1436 "UIF TEMP[1]\n"
1437 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1438 "ENDIF\n"
1439 "ELSE\n"
1440 /* Load previously accumulated result if requested. */
1441 "MOV TEMP[0], IMM[0].xxxx\n"
1442 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1443 "UIF TEMP[4]\n"
1444 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1445 "ENDIF\n"
1446
1447 "MOV TEMP[1].x, IMM[0].xxxx\n"
1448 "BGNLOOP\n"
1449 /* Break if accumulated result so far is not available. */
1450 "UIF TEMP[0].zzzz\n"
1451 "BRK\n"
1452 "ENDIF\n"
1453
1454 /* Break if result_index >= result_count. */
1455 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1456 "UIF TEMP[5]\n"
1457 "BRK\n"
1458 "ENDIF\n"
1459
1460 /* Load fence and check result availability */
1461 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1462 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1463 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1464 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1465 "UIF TEMP[0].zzzz\n"
1466 "BRK\n"
1467 "ENDIF\n"
1468
1469 "MOV TEMP[1].y, IMM[0].xxxx\n"
1470 "BGNLOOP\n"
1471 /* Load start and end. */
1472 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1473 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1474 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1475
1476 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1477 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1478
1479 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1480
1481 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1482 "UIF TEMP[5].zzzz\n"
1483 /* Load second start/end half-pair and
1484 * take the difference
1485 */
1486 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1487 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1488 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1489
1490 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1491 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1492 "ENDIF\n"
1493
1494 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1495
1496 /* Increment pair index */
1497 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1498 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1499 "UIF TEMP[5]\n"
1500 "BRK\n"
1501 "ENDIF\n"
1502 "ENDLOOP\n"
1503
1504 /* Increment result index */
1505 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1506 "ENDLOOP\n"
1507 "ENDIF\n"
1508
1509 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1510 "UIF TEMP[4]\n"
1511 /* Store accumulated data for chaining. */
1512 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1513 "ELSE\n"
1514 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1515 "UIF TEMP[4]\n"
1516 /* Store result availability. */
1517 "NOT TEMP[0].z, TEMP[0]\n"
1518 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1519 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1520
1521 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1522 "UIF TEMP[4]\n"
1523 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1524 "ENDIF\n"
1525 "ELSE\n"
1526 /* Store result if it is available. */
1527 "NOT TEMP[4], TEMP[0].zzzz\n"
1528 "UIF TEMP[4]\n"
1529 /* Apply timestamp conversion */
1530 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1531 "UIF TEMP[4]\n"
1532 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1533 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1534 "ENDIF\n"
1535
1536 /* Convert to boolean */
1537 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1538 "UIF TEMP[4]\n"
1539 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1540 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1541 "MOV TEMP[0].y, IMM[0].xxxx\n"
1542 "ENDIF\n"
1543
1544 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1545 "UIF TEMP[4]\n"
1546 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1547 "ELSE\n"
1548 /* Clamping */
1549 "UIF TEMP[0].yyyy\n"
1550 "MOV TEMP[0].x, IMM[0].wwww\n"
1551 "ENDIF\n"
1552
1553 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1554 "UIF TEMP[4]\n"
1555 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1556 "ENDIF\n"
1557
1558 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1559 "ENDIF\n"
1560 "ENDIF\n"
1561 "ENDIF\n"
1562 "ENDIF\n"
1563
1564 "END\n";
1565
1566 char text[sizeof(text_tmpl) + 32];
1567 struct tgsi_token tokens[1024];
1568 struct pipe_compute_state state = {};
1569
1570 /* Hard code the frequency into the shader so that the backend can
1571 * use the full range of optimizations for divide-by-constant.
1572 */
1573 snprintf(text, sizeof(text), text_tmpl,
1574 rctx->screen->info.clock_crystal_freq);
1575
1576 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1577 assert(false);
1578 return;
1579 }
1580
1581 state.ir_type = PIPE_SHADER_IR_TGSI;
1582 state.prog = tokens;
1583
1584 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1585 }
1586
1587 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1588 struct r600_qbo_state *st)
1589 {
1590 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1591
1592 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1593 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1594
1595 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1596 for (unsigned i = 0; i < 3; ++i)
1597 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1598 }
1599
1600 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1601 struct r600_query *rquery,
1602 bool wait,
1603 enum pipe_query_value_type result_type,
1604 int index,
1605 struct pipe_resource *resource,
1606 unsigned offset)
1607 {
1608 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1609 struct r600_query_buffer *qbuf;
1610 struct r600_query_buffer *qbuf_prev;
1611 struct pipe_resource *tmp_buffer = NULL;
1612 unsigned tmp_buffer_offset = 0;
1613 struct r600_qbo_state saved_state = {};
1614 struct pipe_grid_info grid = {};
1615 struct pipe_constant_buffer constant_buffer = {};
1616 struct pipe_shader_buffer ssbo[3];
1617 struct r600_hw_query_params params;
1618 struct {
1619 uint32_t end_offset;
1620 uint32_t result_stride;
1621 uint32_t result_count;
1622 uint32_t config;
1623 uint32_t fence_offset;
1624 uint32_t pair_stride;
1625 uint32_t pair_count;
1626 } consts;
1627
1628 if (!rctx->query_result_shader) {
1629 r600_create_query_result_shader(rctx);
1630 if (!rctx->query_result_shader)
1631 return;
1632 }
1633
1634 if (query->buffer.previous) {
1635 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1636 &tmp_buffer_offset, &tmp_buffer);
1637 if (!tmp_buffer)
1638 return;
1639 }
1640
1641 rctx->save_qbo_state(&rctx->b, &saved_state);
1642
1643 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1644 consts.end_offset = params.end_offset - params.start_offset;
1645 consts.fence_offset = params.fence_offset - params.start_offset;
1646 consts.result_stride = query->result_size;
1647 consts.pair_stride = params.pair_stride;
1648 consts.pair_count = params.pair_count;
1649
1650 constant_buffer.buffer_size = sizeof(consts);
1651 constant_buffer.user_buffer = &consts;
1652
1653 ssbo[1].buffer = tmp_buffer;
1654 ssbo[1].buffer_offset = tmp_buffer_offset;
1655 ssbo[1].buffer_size = 16;
1656
1657 ssbo[2] = ssbo[1];
1658
1659 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1660
1661 grid.block[0] = 1;
1662 grid.block[1] = 1;
1663 grid.block[2] = 1;
1664 grid.grid[0] = 1;
1665 grid.grid[1] = 1;
1666 grid.grid[2] = 1;
1667
1668 consts.config = 0;
1669 if (index < 0)
1670 consts.config |= 4;
1671 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE)
1672 consts.config |= 8;
1673 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1674 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1675 consts.config |= 8 | 256;
1676 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1677 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1678 consts.config |= 32;
1679
1680 switch (result_type) {
1681 case PIPE_QUERY_TYPE_U64:
1682 case PIPE_QUERY_TYPE_I64:
1683 consts.config |= 64;
1684 break;
1685 case PIPE_QUERY_TYPE_I32:
1686 consts.config |= 128;
1687 break;
1688 case PIPE_QUERY_TYPE_U32:
1689 break;
1690 }
1691
1692 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1693
1694 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1695 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1696 qbuf_prev = qbuf->previous;
1697 consts.result_count = qbuf->results_end / query->result_size;
1698 consts.config &= ~3;
1699 if (qbuf != &query->buffer)
1700 consts.config |= 1;
1701 if (qbuf->previous)
1702 consts.config |= 2;
1703 } else {
1704 /* Only read the last timestamp. */
1705 qbuf_prev = NULL;
1706 consts.result_count = 0;
1707 consts.config |= 16;
1708 params.start_offset += qbuf->results_end - query->result_size;
1709 }
1710
1711 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1712
1713 ssbo[0].buffer = &qbuf->buf->b.b;
1714 ssbo[0].buffer_offset = params.start_offset;
1715 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1716
1717 if (!qbuf->previous) {
1718 ssbo[2].buffer = resource;
1719 ssbo[2].buffer_offset = offset;
1720 ssbo[2].buffer_size = 8;
1721
1722 }
1723
1724 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1725
1726 if (wait && qbuf == &query->buffer) {
1727 uint64_t va;
1728
1729 /* Wait for result availability. Wait only for readiness
1730 * of the last entry, since the fence writes should be
1731 * serialized in the CP.
1732 */
1733 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1734 va += params.fence_offset;
1735
1736 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1737 }
1738
1739 rctx->b.launch_grid(&rctx->b, &grid);
1740 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1741 }
1742
1743 r600_restore_qbo_state(rctx, &saved_state);
1744 pipe_resource_reference(&tmp_buffer, NULL);
1745 }
1746
1747 static void r600_render_condition(struct pipe_context *ctx,
1748 struct pipe_query *query,
1749 boolean condition,
1750 enum pipe_render_cond_flag mode)
1751 {
1752 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1753 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1754 struct r600_query_buffer *qbuf;
1755 struct r600_atom *atom = &rctx->render_cond_atom;
1756
1757 /* Compute the size of SET_PREDICATION packets. */
1758 atom->num_dw = 0;
1759 if (query) {
1760 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1761 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1762
1763 if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1764 atom->num_dw *= R600_MAX_STREAMS;
1765 }
1766
1767 rctx->render_cond = query;
1768 rctx->render_cond_invert = condition;
1769 rctx->render_cond_mode = mode;
1770
1771 rctx->set_atom_dirty(rctx, atom, query != NULL);
1772 }
1773
1774 void r600_suspend_queries(struct r600_common_context *ctx)
1775 {
1776 struct r600_query_hw *query;
1777
1778 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1779 r600_query_hw_emit_stop(ctx, query);
1780 }
1781 assert(ctx->num_cs_dw_queries_suspend == 0);
1782 }
1783
1784 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1785 struct list_head *query_list)
1786 {
1787 struct r600_query_hw *query;
1788 unsigned num_dw = 0;
1789
1790 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1791 /* begin + end */
1792 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1793
1794 /* Workaround for the fact that
1795 * num_cs_dw_nontimer_queries_suspend is incremented for every
1796 * resumed query, which raises the bar in need_cs_space for
1797 * queries about to be resumed.
1798 */
1799 num_dw += query->num_cs_dw_end;
1800 }
1801 /* primitives generated query */
1802 num_dw += ctx->streamout.enable_atom.num_dw;
1803 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1804 num_dw += 13;
1805
1806 return num_dw;
1807 }
1808
1809 void r600_resume_queries(struct r600_common_context *ctx)
1810 {
1811 struct r600_query_hw *query;
1812 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1813
1814 assert(ctx->num_cs_dw_queries_suspend == 0);
1815
1816 /* Check CS space here. Resuming must not be interrupted by flushes. */
1817 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1818
1819 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1820 r600_query_hw_emit_start(ctx, query);
1821 }
1822 }
1823
1824 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1825 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1826 {
1827 struct r600_common_context *ctx =
1828 (struct r600_common_context*)rscreen->aux_context;
1829 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1830 struct r600_resource *buffer;
1831 uint32_t *results;
1832 unsigned i, mask = 0;
1833 unsigned max_rbs = ctx->screen->info.num_render_backends;
1834
1835 assert(rscreen->chip_class <= CAYMAN);
1836
1837 /* if backend_map query is supported by the kernel */
1838 if (rscreen->info.r600_gb_backend_map_valid) {
1839 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1840 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1841 unsigned item_width, item_mask;
1842
1843 if (ctx->chip_class >= EVERGREEN) {
1844 item_width = 4;
1845 item_mask = 0x7;
1846 } else {
1847 item_width = 2;
1848 item_mask = 0x3;
1849 }
1850
1851 while (num_tile_pipes--) {
1852 i = backend_map & item_mask;
1853 mask |= (1<<i);
1854 backend_map >>= item_width;
1855 }
1856 if (mask != 0) {
1857 rscreen->info.enabled_rb_mask = mask;
1858 return;
1859 }
1860 }
1861
1862 /* otherwise backup path for older kernels */
1863
1864 /* create buffer for event data */
1865 buffer = (struct r600_resource*)
1866 pipe_buffer_create(ctx->b.screen, 0,
1867 PIPE_USAGE_STAGING, max_rbs * 16);
1868 if (!buffer)
1869 return;
1870
1871 /* initialize buffer with zeroes */
1872 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1873 if (results) {
1874 memset(results, 0, max_rbs * 4 * 4);
1875
1876 /* emit EVENT_WRITE for ZPASS_DONE */
1877 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1878 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1879 radeon_emit(cs, buffer->gpu_address);
1880 radeon_emit(cs, buffer->gpu_address >> 32);
1881
1882 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1883 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1884
1885 /* analyze results */
1886 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1887 if (results) {
1888 for(i = 0; i < max_rbs; i++) {
1889 /* at least highest bit will be set if backend is used */
1890 if (results[i*4 + 1])
1891 mask |= (1<<i);
1892 }
1893 }
1894 }
1895
1896 r600_resource_reference(&buffer, NULL);
1897
1898 if (mask)
1899 rscreen->info.enabled_rb_mask = mask;
1900 }
1901
1902 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1903 { \
1904 .name = name_, \
1905 .query_type = R600_QUERY_##query_type_, \
1906 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1907 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1908 .group_id = group_id_ \
1909 }
1910
1911 #define X(name_, query_type_, type_, result_type_) \
1912 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1913
1914 #define XG(group_, name_, query_type_, type_, result_type_) \
1915 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1916
1917 static struct pipe_driver_query_info r600_driver_query_list[] = {
1918 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1919 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1920 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1921 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1922 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
1923 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1924 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1925 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1926 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1927 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1928 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1929 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1930 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1931 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1932 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1933 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1934 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1935 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1936 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1937 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1938 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1939 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1940 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1941 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1942 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1943 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1944 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1945 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1946 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1947 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1948 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1949 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1950 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1951 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1952 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1953 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1954 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1955 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1956 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1957 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1958
1959 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1960 * which use it as a fallback path to detect the GPU type.
1961 *
1962 * Note: The names of these queries are significant for GPUPerfStudio
1963 * (and possibly their order as well). */
1964 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1965 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1966 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1967 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1968 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1969
1970 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1971 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1972 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1973
1974 /* The following queries must be at the end of the list because their
1975 * availability is adjusted dynamically based on the DRM version. */
1976 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1977 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1978 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1979 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1980 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1981 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1982 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1983 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1984 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1985 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1986 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1987 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1988 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1989 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1990 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1991 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1992 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1993 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1994 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1995 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
1996 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1997 };
1998
1999 #undef X
2000 #undef XG
2001 #undef XFULL
2002
2003 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2004 {
2005 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
2006 return ARRAY_SIZE(r600_driver_query_list);
2007 else
2008 return ARRAY_SIZE(r600_driver_query_list) - 25;
2009 }
2010
2011 static int r600_get_driver_query_info(struct pipe_screen *screen,
2012 unsigned index,
2013 struct pipe_driver_query_info *info)
2014 {
2015 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2016 unsigned num_queries = r600_get_num_queries(rscreen);
2017
2018 if (!info) {
2019 unsigned num_perfcounters =
2020 r600_get_perfcounter_info(rscreen, 0, NULL);
2021
2022 return num_queries + num_perfcounters;
2023 }
2024
2025 if (index >= num_queries)
2026 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2027
2028 *info = r600_driver_query_list[index];
2029
2030 switch (info->query_type) {
2031 case R600_QUERY_REQUESTED_VRAM:
2032 case R600_QUERY_VRAM_USAGE:
2033 case R600_QUERY_MAPPED_VRAM:
2034 info->max_value.u64 = rscreen->info.vram_size;
2035 break;
2036 case R600_QUERY_REQUESTED_GTT:
2037 case R600_QUERY_GTT_USAGE:
2038 case R600_QUERY_MAPPED_GTT:
2039 info->max_value.u64 = rscreen->info.gart_size;
2040 break;
2041 case R600_QUERY_GPU_TEMPERATURE:
2042 info->max_value.u64 = 125;
2043 break;
2044 case R600_QUERY_VRAM_VIS_USAGE:
2045 info->max_value.u64 = rscreen->info.vram_vis_size;
2046 break;
2047 }
2048
2049 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2050 info->group_id += rscreen->perfcounters->num_groups;
2051
2052 return 1;
2053 }
2054
2055 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2056 * performance counter groups, so be careful when changing this and related
2057 * functions.
2058 */
2059 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2060 unsigned index,
2061 struct pipe_driver_query_group_info *info)
2062 {
2063 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2064 unsigned num_pc_groups = 0;
2065
2066 if (rscreen->perfcounters)
2067 num_pc_groups = rscreen->perfcounters->num_groups;
2068
2069 if (!info)
2070 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2071
2072 if (index < num_pc_groups)
2073 return r600_get_perfcounter_group_info(rscreen, index, info);
2074
2075 index -= num_pc_groups;
2076 if (index >= R600_NUM_SW_QUERY_GROUPS)
2077 return 0;
2078
2079 info->name = "GPIN";
2080 info->max_active_queries = 5;
2081 info->num_queries = 5;
2082 return 1;
2083 }
2084
2085 void r600_query_init(struct r600_common_context *rctx)
2086 {
2087 rctx->b.create_query = r600_create_query;
2088 rctx->b.create_batch_query = r600_create_batch_query;
2089 rctx->b.destroy_query = r600_destroy_query;
2090 rctx->b.begin_query = r600_begin_query;
2091 rctx->b.end_query = r600_end_query;
2092 rctx->b.get_query_result = r600_get_query_result;
2093 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2094 rctx->render_cond_atom.emit = r600_emit_query_predication;
2095
2096 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2097 rctx->b.render_condition = r600_render_condition;
2098
2099 LIST_INITHEAD(&rctx->active_queries);
2100 }
2101
2102 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2103 {
2104 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2105 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2106 }