radeonsi: expose shader cache stats to the HUD
[mesa.git] / src / gallium / drivers / radeonsi / si_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "si_pipe.h"
28 #include "si_query.h"
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/os_time.h"
32 #include "util/u_suballoc.h"
33 #include "amd/common/sid.h"
34
35 static const struct si_query_ops query_hw_ops;
36
37 struct si_hw_query_params {
38 unsigned start_offset;
39 unsigned end_offset;
40 unsigned fence_offset;
41 unsigned pair_stride;
42 unsigned pair_count;
43 };
44
45 /* Queries without buffer handling or suspend/resume. */
46 struct si_query_sw {
47 struct si_query b;
48
49 uint64_t begin_result;
50 uint64_t end_result;
51
52 uint64_t begin_time;
53 uint64_t end_time;
54
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle *fence;
57 };
58
59 static void si_query_sw_destroy(struct si_context *sctx,
60 struct si_query *squery)
61 {
62 struct si_query_sw *query = (struct si_query_sw *)squery;
63
64 sctx->b.screen->fence_reference(sctx->b.screen, &query->fence, NULL);
65 FREE(query);
66 }
67
68 static enum radeon_value_id winsys_id_from_type(unsigned type)
69 {
70 switch (type) {
71 case SI_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
72 case SI_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
73 case SI_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
74 case SI_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
75 case SI_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
76 case SI_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
77 case SI_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
78 case SI_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
79 case SI_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
80 case SI_QUERY_GFX_IB_SIZE: return RADEON_GFX_IB_SIZE_COUNTER;
81 case SI_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
82 case SI_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
83 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
84 case SI_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
85 case SI_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
86 case SI_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
87 case SI_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
88 case SI_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
89 case SI_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
90 case SI_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
91 default: unreachable("query type does not correspond to winsys id");
92 }
93 }
94
95 static int64_t si_finish_dma_get_cpu_time(struct si_context *sctx)
96 {
97 struct pipe_fence_handle *fence = NULL;
98
99 si_flush_dma_cs(sctx, 0, &fence);
100 if (fence) {
101 sctx->ws->fence_wait(sctx->ws, fence, PIPE_TIMEOUT_INFINITE);
102 sctx->ws->fence_reference(&fence, NULL);
103 }
104
105 return os_time_get_nano();
106 }
107
108 static bool si_query_sw_begin(struct si_context *sctx,
109 struct si_query *squery)
110 {
111 struct si_query_sw *query = (struct si_query_sw *)squery;
112 enum radeon_value_id ws_id;
113
114 switch(query->b.type) {
115 case PIPE_QUERY_TIMESTAMP_DISJOINT:
116 case PIPE_QUERY_GPU_FINISHED:
117 break;
118 case SI_QUERY_TIME_ELAPSED_SDMA_SI:
119 query->begin_result = si_finish_dma_get_cpu_time(sctx);
120 break;
121 case SI_QUERY_DRAW_CALLS:
122 query->begin_result = sctx->num_draw_calls;
123 break;
124 case SI_QUERY_DECOMPRESS_CALLS:
125 query->begin_result = sctx->num_decompress_calls;
126 break;
127 case SI_QUERY_MRT_DRAW_CALLS:
128 query->begin_result = sctx->num_mrt_draw_calls;
129 break;
130 case SI_QUERY_PRIM_RESTART_CALLS:
131 query->begin_result = sctx->num_prim_restart_calls;
132 break;
133 case SI_QUERY_SPILL_DRAW_CALLS:
134 query->begin_result = sctx->num_spill_draw_calls;
135 break;
136 case SI_QUERY_COMPUTE_CALLS:
137 query->begin_result = sctx->num_compute_calls;
138 break;
139 case SI_QUERY_SPILL_COMPUTE_CALLS:
140 query->begin_result = sctx->num_spill_compute_calls;
141 break;
142 case SI_QUERY_DMA_CALLS:
143 query->begin_result = sctx->num_dma_calls;
144 break;
145 case SI_QUERY_CP_DMA_CALLS:
146 query->begin_result = sctx->num_cp_dma_calls;
147 break;
148 case SI_QUERY_NUM_VS_FLUSHES:
149 query->begin_result = sctx->num_vs_flushes;
150 break;
151 case SI_QUERY_NUM_PS_FLUSHES:
152 query->begin_result = sctx->num_ps_flushes;
153 break;
154 case SI_QUERY_NUM_CS_FLUSHES:
155 query->begin_result = sctx->num_cs_flushes;
156 break;
157 case SI_QUERY_NUM_CB_CACHE_FLUSHES:
158 query->begin_result = sctx->num_cb_cache_flushes;
159 break;
160 case SI_QUERY_NUM_DB_CACHE_FLUSHES:
161 query->begin_result = sctx->num_db_cache_flushes;
162 break;
163 case SI_QUERY_NUM_L2_INVALIDATES:
164 query->begin_result = sctx->num_L2_invalidates;
165 break;
166 case SI_QUERY_NUM_L2_WRITEBACKS:
167 query->begin_result = sctx->num_L2_writebacks;
168 break;
169 case SI_QUERY_NUM_RESIDENT_HANDLES:
170 query->begin_result = sctx->num_resident_handles;
171 break;
172 case SI_QUERY_TC_OFFLOADED_SLOTS:
173 query->begin_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
174 break;
175 case SI_QUERY_TC_DIRECT_SLOTS:
176 query->begin_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
177 break;
178 case SI_QUERY_TC_NUM_SYNCS:
179 query->begin_result = sctx->tc ? sctx->tc->num_syncs : 0;
180 break;
181 case SI_QUERY_REQUESTED_VRAM:
182 case SI_QUERY_REQUESTED_GTT:
183 case SI_QUERY_MAPPED_VRAM:
184 case SI_QUERY_MAPPED_GTT:
185 case SI_QUERY_VRAM_USAGE:
186 case SI_QUERY_VRAM_VIS_USAGE:
187 case SI_QUERY_GTT_USAGE:
188 case SI_QUERY_GPU_TEMPERATURE:
189 case SI_QUERY_CURRENT_GPU_SCLK:
190 case SI_QUERY_CURRENT_GPU_MCLK:
191 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
192 case SI_QUERY_NUM_MAPPED_BUFFERS:
193 query->begin_result = 0;
194 break;
195 case SI_QUERY_BUFFER_WAIT_TIME:
196 case SI_QUERY_GFX_IB_SIZE:
197 case SI_QUERY_NUM_GFX_IBS:
198 case SI_QUERY_NUM_SDMA_IBS:
199 case SI_QUERY_NUM_BYTES_MOVED:
200 case SI_QUERY_NUM_EVICTIONS:
201 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
202 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
203 query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
204 break;
205 }
206 case SI_QUERY_GFX_BO_LIST_SIZE:
207 ws_id = winsys_id_from_type(query->b.type);
208 query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
209 query->begin_time = sctx->ws->query_value(sctx->ws,
210 RADEON_NUM_GFX_IBS);
211 break;
212 case SI_QUERY_CS_THREAD_BUSY:
213 ws_id = winsys_id_from_type(query->b.type);
214 query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
215 query->begin_time = os_time_get_nano();
216 break;
217 case SI_QUERY_GALLIUM_THREAD_BUSY:
218 query->begin_result =
219 sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
220 query->begin_time = os_time_get_nano();
221 break;
222 case SI_QUERY_GPU_LOAD:
223 case SI_QUERY_GPU_SHADERS_BUSY:
224 case SI_QUERY_GPU_TA_BUSY:
225 case SI_QUERY_GPU_GDS_BUSY:
226 case SI_QUERY_GPU_VGT_BUSY:
227 case SI_QUERY_GPU_IA_BUSY:
228 case SI_QUERY_GPU_SX_BUSY:
229 case SI_QUERY_GPU_WD_BUSY:
230 case SI_QUERY_GPU_BCI_BUSY:
231 case SI_QUERY_GPU_SC_BUSY:
232 case SI_QUERY_GPU_PA_BUSY:
233 case SI_QUERY_GPU_DB_BUSY:
234 case SI_QUERY_GPU_CP_BUSY:
235 case SI_QUERY_GPU_CB_BUSY:
236 case SI_QUERY_GPU_SDMA_BUSY:
237 case SI_QUERY_GPU_PFP_BUSY:
238 case SI_QUERY_GPU_MEQ_BUSY:
239 case SI_QUERY_GPU_ME_BUSY:
240 case SI_QUERY_GPU_SURF_SYNC_BUSY:
241 case SI_QUERY_GPU_CP_DMA_BUSY:
242 case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
243 query->begin_result = si_begin_counter(sctx->screen,
244 query->b.type);
245 break;
246 case SI_QUERY_NUM_COMPILATIONS:
247 query->begin_result = p_atomic_read(&sctx->screen->num_compilations);
248 break;
249 case SI_QUERY_NUM_SHADERS_CREATED:
250 query->begin_result = p_atomic_read(&sctx->screen->num_shaders_created);
251 break;
252 case SI_QUERY_LIVE_SHADER_CACHE_HITS:
253 query->begin_result = sctx->screen->live_shader_cache.hits;
254 break;
255 case SI_QUERY_LIVE_SHADER_CACHE_MISSES:
256 query->begin_result = sctx->screen->live_shader_cache.misses;
257 break;
258 case SI_QUERY_MEMORY_SHADER_CACHE_HITS:
259 query->begin_result = sctx->screen->num_memory_shader_cache_hits;
260 break;
261 case SI_QUERY_MEMORY_SHADER_CACHE_MISSES:
262 query->begin_result = sctx->screen->num_memory_shader_cache_misses;
263 break;
264 case SI_QUERY_DISK_SHADER_CACHE_HITS:
265 query->begin_result = sctx->screen->num_disk_shader_cache_hits;
266 break;
267 case SI_QUERY_DISK_SHADER_CACHE_MISSES:
268 query->begin_result = sctx->screen->num_disk_shader_cache_misses;
269 break;
270 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED:
271 query->begin_result = sctx->compute_num_verts_accepted;
272 break;
273 case SI_QUERY_PD_NUM_PRIMS_REJECTED:
274 query->begin_result = sctx->compute_num_verts_rejected;
275 break;
276 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE:
277 query->begin_result = sctx->compute_num_verts_ineligible;
278 break;
279 case SI_QUERY_GPIN_ASIC_ID:
280 case SI_QUERY_GPIN_NUM_SIMD:
281 case SI_QUERY_GPIN_NUM_RB:
282 case SI_QUERY_GPIN_NUM_SPI:
283 case SI_QUERY_GPIN_NUM_SE:
284 break;
285 default:
286 unreachable("si_query_sw_begin: bad query type");
287 }
288
289 return true;
290 }
291
292 static bool si_query_sw_end(struct si_context *sctx,
293 struct si_query *squery)
294 {
295 struct si_query_sw *query = (struct si_query_sw *)squery;
296 enum radeon_value_id ws_id;
297
298 switch(query->b.type) {
299 case PIPE_QUERY_TIMESTAMP_DISJOINT:
300 break;
301 case PIPE_QUERY_GPU_FINISHED:
302 sctx->b.flush(&sctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
303 break;
304 case SI_QUERY_TIME_ELAPSED_SDMA_SI:
305 query->end_result = si_finish_dma_get_cpu_time(sctx);
306 break;
307 case SI_QUERY_DRAW_CALLS:
308 query->end_result = sctx->num_draw_calls;
309 break;
310 case SI_QUERY_DECOMPRESS_CALLS:
311 query->end_result = sctx->num_decompress_calls;
312 break;
313 case SI_QUERY_MRT_DRAW_CALLS:
314 query->end_result = sctx->num_mrt_draw_calls;
315 break;
316 case SI_QUERY_PRIM_RESTART_CALLS:
317 query->end_result = sctx->num_prim_restart_calls;
318 break;
319 case SI_QUERY_SPILL_DRAW_CALLS:
320 query->end_result = sctx->num_spill_draw_calls;
321 break;
322 case SI_QUERY_COMPUTE_CALLS:
323 query->end_result = sctx->num_compute_calls;
324 break;
325 case SI_QUERY_SPILL_COMPUTE_CALLS:
326 query->end_result = sctx->num_spill_compute_calls;
327 break;
328 case SI_QUERY_DMA_CALLS:
329 query->end_result = sctx->num_dma_calls;
330 break;
331 case SI_QUERY_CP_DMA_CALLS:
332 query->end_result = sctx->num_cp_dma_calls;
333 break;
334 case SI_QUERY_NUM_VS_FLUSHES:
335 query->end_result = sctx->num_vs_flushes;
336 break;
337 case SI_QUERY_NUM_PS_FLUSHES:
338 query->end_result = sctx->num_ps_flushes;
339 break;
340 case SI_QUERY_NUM_CS_FLUSHES:
341 query->end_result = sctx->num_cs_flushes;
342 break;
343 case SI_QUERY_NUM_CB_CACHE_FLUSHES:
344 query->end_result = sctx->num_cb_cache_flushes;
345 break;
346 case SI_QUERY_NUM_DB_CACHE_FLUSHES:
347 query->end_result = sctx->num_db_cache_flushes;
348 break;
349 case SI_QUERY_NUM_L2_INVALIDATES:
350 query->end_result = sctx->num_L2_invalidates;
351 break;
352 case SI_QUERY_NUM_L2_WRITEBACKS:
353 query->end_result = sctx->num_L2_writebacks;
354 break;
355 case SI_QUERY_NUM_RESIDENT_HANDLES:
356 query->end_result = sctx->num_resident_handles;
357 break;
358 case SI_QUERY_TC_OFFLOADED_SLOTS:
359 query->end_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
360 break;
361 case SI_QUERY_TC_DIRECT_SLOTS:
362 query->end_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
363 break;
364 case SI_QUERY_TC_NUM_SYNCS:
365 query->end_result = sctx->tc ? sctx->tc->num_syncs : 0;
366 break;
367 case SI_QUERY_REQUESTED_VRAM:
368 case SI_QUERY_REQUESTED_GTT:
369 case SI_QUERY_MAPPED_VRAM:
370 case SI_QUERY_MAPPED_GTT:
371 case SI_QUERY_VRAM_USAGE:
372 case SI_QUERY_VRAM_VIS_USAGE:
373 case SI_QUERY_GTT_USAGE:
374 case SI_QUERY_GPU_TEMPERATURE:
375 case SI_QUERY_CURRENT_GPU_SCLK:
376 case SI_QUERY_CURRENT_GPU_MCLK:
377 case SI_QUERY_BUFFER_WAIT_TIME:
378 case SI_QUERY_GFX_IB_SIZE:
379 case SI_QUERY_NUM_MAPPED_BUFFERS:
380 case SI_QUERY_NUM_GFX_IBS:
381 case SI_QUERY_NUM_SDMA_IBS:
382 case SI_QUERY_NUM_BYTES_MOVED:
383 case SI_QUERY_NUM_EVICTIONS:
384 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
385 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
386 query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
387 break;
388 }
389 case SI_QUERY_GFX_BO_LIST_SIZE:
390 ws_id = winsys_id_from_type(query->b.type);
391 query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
392 query->end_time = sctx->ws->query_value(sctx->ws,
393 RADEON_NUM_GFX_IBS);
394 break;
395 case SI_QUERY_CS_THREAD_BUSY:
396 ws_id = winsys_id_from_type(query->b.type);
397 query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
398 query->end_time = os_time_get_nano();
399 break;
400 case SI_QUERY_GALLIUM_THREAD_BUSY:
401 query->end_result =
402 sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
403 query->end_time = os_time_get_nano();
404 break;
405 case SI_QUERY_GPU_LOAD:
406 case SI_QUERY_GPU_SHADERS_BUSY:
407 case SI_QUERY_GPU_TA_BUSY:
408 case SI_QUERY_GPU_GDS_BUSY:
409 case SI_QUERY_GPU_VGT_BUSY:
410 case SI_QUERY_GPU_IA_BUSY:
411 case SI_QUERY_GPU_SX_BUSY:
412 case SI_QUERY_GPU_WD_BUSY:
413 case SI_QUERY_GPU_BCI_BUSY:
414 case SI_QUERY_GPU_SC_BUSY:
415 case SI_QUERY_GPU_PA_BUSY:
416 case SI_QUERY_GPU_DB_BUSY:
417 case SI_QUERY_GPU_CP_BUSY:
418 case SI_QUERY_GPU_CB_BUSY:
419 case SI_QUERY_GPU_SDMA_BUSY:
420 case SI_QUERY_GPU_PFP_BUSY:
421 case SI_QUERY_GPU_MEQ_BUSY:
422 case SI_QUERY_GPU_ME_BUSY:
423 case SI_QUERY_GPU_SURF_SYNC_BUSY:
424 case SI_QUERY_GPU_CP_DMA_BUSY:
425 case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
426 query->end_result = si_end_counter(sctx->screen,
427 query->b.type,
428 query->begin_result);
429 query->begin_result = 0;
430 break;
431 case SI_QUERY_NUM_COMPILATIONS:
432 query->end_result = p_atomic_read(&sctx->screen->num_compilations);
433 break;
434 case SI_QUERY_NUM_SHADERS_CREATED:
435 query->end_result = p_atomic_read(&sctx->screen->num_shaders_created);
436 break;
437 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
438 query->end_result = sctx->last_tex_ps_draw_ratio;
439 break;
440 case SI_QUERY_LIVE_SHADER_CACHE_HITS:
441 query->end_result = sctx->screen->live_shader_cache.hits;
442 break;
443 case SI_QUERY_LIVE_SHADER_CACHE_MISSES:
444 query->end_result = sctx->screen->live_shader_cache.misses;
445 break;
446 case SI_QUERY_MEMORY_SHADER_CACHE_HITS:
447 query->end_result = sctx->screen->num_memory_shader_cache_hits;
448 break;
449 case SI_QUERY_MEMORY_SHADER_CACHE_MISSES:
450 query->end_result = sctx->screen->num_memory_shader_cache_misses;
451 break;
452 case SI_QUERY_DISK_SHADER_CACHE_HITS:
453 query->end_result = sctx->screen->num_disk_shader_cache_hits;
454 break;
455 case SI_QUERY_DISK_SHADER_CACHE_MISSES:
456 query->end_result = sctx->screen->num_disk_shader_cache_misses;
457 break;
458 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED:
459 query->end_result = sctx->compute_num_verts_accepted;
460 break;
461 case SI_QUERY_PD_NUM_PRIMS_REJECTED:
462 query->end_result = sctx->compute_num_verts_rejected;
463 break;
464 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE:
465 query->end_result = sctx->compute_num_verts_ineligible;
466 break;
467 case SI_QUERY_GPIN_ASIC_ID:
468 case SI_QUERY_GPIN_NUM_SIMD:
469 case SI_QUERY_GPIN_NUM_RB:
470 case SI_QUERY_GPIN_NUM_SPI:
471 case SI_QUERY_GPIN_NUM_SE:
472 break;
473 default:
474 unreachable("si_query_sw_end: bad query type");
475 }
476
477 return true;
478 }
479
480 static bool si_query_sw_get_result(struct si_context *sctx,
481 struct si_query *squery,
482 bool wait,
483 union pipe_query_result *result)
484 {
485 struct si_query_sw *query = (struct si_query_sw *)squery;
486
487 switch (query->b.type) {
488 case PIPE_QUERY_TIMESTAMP_DISJOINT:
489 /* Convert from cycles per millisecond to cycles per second (Hz). */
490 result->timestamp_disjoint.frequency =
491 (uint64_t)sctx->screen->info.clock_crystal_freq * 1000;
492 result->timestamp_disjoint.disjoint = false;
493 return true;
494 case PIPE_QUERY_GPU_FINISHED: {
495 struct pipe_screen *screen = sctx->b.screen;
496 struct pipe_context *ctx = squery->b.flushed ? NULL : &sctx->b;
497
498 result->b = screen->fence_finish(screen, ctx, query->fence,
499 wait ? PIPE_TIMEOUT_INFINITE : 0);
500 return result->b;
501 }
502
503 case SI_QUERY_GFX_BO_LIST_SIZE:
504 result->u64 = (query->end_result - query->begin_result) /
505 (query->end_time - query->begin_time);
506 return true;
507 case SI_QUERY_CS_THREAD_BUSY:
508 case SI_QUERY_GALLIUM_THREAD_BUSY:
509 result->u64 = (query->end_result - query->begin_result) * 100 /
510 (query->end_time - query->begin_time);
511 return true;
512 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED:
513 case SI_QUERY_PD_NUM_PRIMS_REJECTED:
514 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE:
515 result->u64 = ((unsigned)query->end_result -
516 (unsigned)query->begin_result) / 3;
517 return true;
518 case SI_QUERY_GPIN_ASIC_ID:
519 result->u32 = 0;
520 return true;
521 case SI_QUERY_GPIN_NUM_SIMD:
522 result->u32 = sctx->screen->info.num_good_compute_units;
523 return true;
524 case SI_QUERY_GPIN_NUM_RB:
525 result->u32 = sctx->screen->info.num_render_backends;
526 return true;
527 case SI_QUERY_GPIN_NUM_SPI:
528 result->u32 = 1; /* all supported chips have one SPI per SE */
529 return true;
530 case SI_QUERY_GPIN_NUM_SE:
531 result->u32 = sctx->screen->info.max_se;
532 return true;
533 }
534
535 result->u64 = query->end_result - query->begin_result;
536
537 switch (query->b.type) {
538 case SI_QUERY_BUFFER_WAIT_TIME:
539 case SI_QUERY_GPU_TEMPERATURE:
540 result->u64 /= 1000;
541 break;
542 case SI_QUERY_CURRENT_GPU_SCLK:
543 case SI_QUERY_CURRENT_GPU_MCLK:
544 result->u64 *= 1000000;
545 break;
546 }
547
548 return true;
549 }
550
551
552 static const struct si_query_ops sw_query_ops = {
553 .destroy = si_query_sw_destroy,
554 .begin = si_query_sw_begin,
555 .end = si_query_sw_end,
556 .get_result = si_query_sw_get_result,
557 .get_result_resource = NULL
558 };
559
560 static struct pipe_query *si_query_sw_create(unsigned query_type)
561 {
562 struct si_query_sw *query;
563
564 query = CALLOC_STRUCT(si_query_sw);
565 if (!query)
566 return NULL;
567
568 query->b.type = query_type;
569 query->b.ops = &sw_query_ops;
570
571 return (struct pipe_query *)query;
572 }
573
574 void si_query_buffer_destroy(struct si_screen *sscreen, struct si_query_buffer *buffer)
575 {
576 struct si_query_buffer *prev = buffer->previous;
577
578 /* Release all query buffers. */
579 while (prev) {
580 struct si_query_buffer *qbuf = prev;
581 prev = prev->previous;
582 si_resource_reference(&qbuf->buf, NULL);
583 FREE(qbuf);
584 }
585
586 si_resource_reference(&buffer->buf, NULL);
587 }
588
589 void si_query_buffer_reset(struct si_context *sctx, struct si_query_buffer *buffer)
590 {
591 /* Discard all query buffers except for the oldest. */
592 while (buffer->previous) {
593 struct si_query_buffer *qbuf = buffer->previous;
594 buffer->previous = qbuf->previous;
595
596 si_resource_reference(&buffer->buf, NULL);
597 buffer->buf = qbuf->buf; /* move ownership */
598 FREE(qbuf);
599 }
600 buffer->results_end = 0;
601
602 if (!buffer->buf)
603 return;
604
605 /* Discard even the oldest buffer if it can't be mapped without a stall. */
606 if (si_rings_is_buffer_referenced(sctx, buffer->buf->buf, RADEON_USAGE_READWRITE) ||
607 !sctx->ws->buffer_wait(buffer->buf->buf, 0, RADEON_USAGE_READWRITE)) {
608 si_resource_reference(&buffer->buf, NULL);
609 } else {
610 buffer->unprepared = true;
611 }
612 }
613
614 bool si_query_buffer_alloc(struct si_context *sctx, struct si_query_buffer *buffer,
615 bool (*prepare_buffer)(struct si_context *, struct si_query_buffer*),
616 unsigned size)
617 {
618 bool unprepared = buffer->unprepared;
619 buffer->unprepared = false;
620
621 if (!buffer->buf || buffer->results_end + size > buffer->buf->b.b.width0) {
622 if (buffer->buf) {
623 struct si_query_buffer *qbuf = MALLOC_STRUCT(si_query_buffer);
624 memcpy(qbuf, buffer, sizeof(*qbuf));
625 buffer->previous = qbuf;
626 }
627 buffer->results_end = 0;
628
629 /* Queries are normally read by the CPU after
630 * being written by the gpu, hence staging is probably a good
631 * usage pattern.
632 */
633 struct si_screen *screen = sctx->screen;
634 unsigned buf_size = MAX2(size, screen->info.min_alloc_size);
635 buffer->buf = si_resource(
636 pipe_buffer_create(&screen->b, 0, PIPE_USAGE_STAGING, buf_size));
637 if (unlikely(!buffer->buf))
638 return false;
639 unprepared = true;
640 }
641
642 if (unprepared && prepare_buffer) {
643 if (unlikely(!prepare_buffer(sctx, buffer))) {
644 si_resource_reference(&buffer->buf, NULL);
645 return false;
646 }
647 }
648
649 return true;
650 }
651
652
653 void si_query_hw_destroy(struct si_context *sctx, struct si_query *squery)
654 {
655 struct si_query_hw *query = (struct si_query_hw *)squery;
656
657 si_query_buffer_destroy(sctx->screen, &query->buffer);
658 si_resource_reference(&query->workaround_buf, NULL);
659 FREE(squery);
660 }
661
662 static bool si_query_hw_prepare_buffer(struct si_context *sctx,
663 struct si_query_buffer *qbuf)
664 {
665 static const struct si_query_hw si_query_hw_s;
666 struct si_query_hw *query = container_of(qbuf, &si_query_hw_s, buffer);
667 struct si_screen *screen = sctx->screen;
668
669 /* The caller ensures that the buffer is currently unused by the GPU. */
670 uint32_t *results = screen->ws->buffer_map(qbuf->buf->buf, NULL,
671 PIPE_TRANSFER_WRITE |
672 PIPE_TRANSFER_UNSYNCHRONIZED);
673 if (!results)
674 return false;
675
676 memset(results, 0, qbuf->buf->b.b.width0);
677
678 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
679 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
680 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
681 unsigned max_rbs = screen->info.num_render_backends;
682 unsigned enabled_rb_mask = screen->info.enabled_rb_mask;
683 unsigned num_results;
684 unsigned i, j;
685
686 /* Set top bits for unused backends. */
687 num_results = qbuf->buf->b.b.width0 / query->result_size;
688 for (j = 0; j < num_results; j++) {
689 for (i = 0; i < max_rbs; i++) {
690 if (!(enabled_rb_mask & (1<<i))) {
691 results[(i * 4)+1] = 0x80000000;
692 results[(i * 4)+3] = 0x80000000;
693 }
694 }
695 results += 4 * max_rbs;
696 }
697 }
698
699 return true;
700 }
701
702 static void si_query_hw_get_result_resource(struct si_context *sctx,
703 struct si_query *squery,
704 bool wait,
705 enum pipe_query_value_type result_type,
706 int index,
707 struct pipe_resource *resource,
708 unsigned offset);
709
710 static void si_query_hw_do_emit_start(struct si_context *sctx,
711 struct si_query_hw *query,
712 struct si_resource *buffer,
713 uint64_t va);
714 static void si_query_hw_do_emit_stop(struct si_context *sctx,
715 struct si_query_hw *query,
716 struct si_resource *buffer,
717 uint64_t va);
718 static void si_query_hw_add_result(struct si_screen *sscreen,
719 struct si_query_hw *, void *buffer,
720 union pipe_query_result *result);
721 static void si_query_hw_clear_result(struct si_query_hw *,
722 union pipe_query_result *);
723
724 static struct si_query_hw_ops query_hw_default_hw_ops = {
725 .prepare_buffer = si_query_hw_prepare_buffer,
726 .emit_start = si_query_hw_do_emit_start,
727 .emit_stop = si_query_hw_do_emit_stop,
728 .clear_result = si_query_hw_clear_result,
729 .add_result = si_query_hw_add_result,
730 };
731
732 static struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
733 unsigned query_type,
734 unsigned index)
735 {
736 struct si_query_hw *query = CALLOC_STRUCT(si_query_hw);
737 if (!query)
738 return NULL;
739
740 query->b.type = query_type;
741 query->b.ops = &query_hw_ops;
742 query->ops = &query_hw_default_hw_ops;
743
744 switch (query_type) {
745 case PIPE_QUERY_OCCLUSION_COUNTER:
746 case PIPE_QUERY_OCCLUSION_PREDICATE:
747 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
748 query->result_size = 16 * sscreen->info.num_render_backends;
749 query->result_size += 16; /* for the fence + alignment */
750 query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
751 break;
752 case SI_QUERY_TIME_ELAPSED_SDMA:
753 /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
754 query->result_size = 64;
755 break;
756 case PIPE_QUERY_TIME_ELAPSED:
757 query->result_size = 24;
758 query->b.num_cs_dw_suspend = 8 + si_cp_write_fence_dwords(sscreen);
759 break;
760 case PIPE_QUERY_TIMESTAMP:
761 query->result_size = 16;
762 query->b.num_cs_dw_suspend = 8 + si_cp_write_fence_dwords(sscreen);
763 query->flags = SI_QUERY_HW_FLAG_NO_START;
764 break;
765 case PIPE_QUERY_PRIMITIVES_EMITTED:
766 case PIPE_QUERY_PRIMITIVES_GENERATED:
767 case PIPE_QUERY_SO_STATISTICS:
768 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
769 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
770 query->result_size = 32;
771 query->b.num_cs_dw_suspend = 6;
772 query->stream = index;
773 break;
774 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
775 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
776 query->result_size = 32 * SI_MAX_STREAMS;
777 query->b.num_cs_dw_suspend = 6 * SI_MAX_STREAMS;
778 break;
779 case PIPE_QUERY_PIPELINE_STATISTICS:
780 /* 11 values on GCN. */
781 query->result_size = 11 * 16;
782 query->result_size += 8; /* for the fence + alignment */
783 query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
784 break;
785 default:
786 assert(0);
787 FREE(query);
788 return NULL;
789 }
790
791 return (struct pipe_query *)query;
792 }
793
794 static void si_update_occlusion_query_state(struct si_context *sctx,
795 unsigned type, int diff)
796 {
797 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
798 type == PIPE_QUERY_OCCLUSION_PREDICATE ||
799 type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
800 bool old_enable = sctx->num_occlusion_queries != 0;
801 bool old_perfect_enable =
802 sctx->num_perfect_occlusion_queries != 0;
803 bool enable, perfect_enable;
804
805 sctx->num_occlusion_queries += diff;
806 assert(sctx->num_occlusion_queries >= 0);
807
808 if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
809 sctx->num_perfect_occlusion_queries += diff;
810 assert(sctx->num_perfect_occlusion_queries >= 0);
811 }
812
813 enable = sctx->num_occlusion_queries != 0;
814 perfect_enable = sctx->num_perfect_occlusion_queries != 0;
815
816 if (enable != old_enable || perfect_enable != old_perfect_enable) {
817 si_set_occlusion_query_state(sctx, old_perfect_enable);
818 }
819 }
820 }
821
822 static unsigned event_type_for_stream(unsigned stream)
823 {
824 switch (stream) {
825 default:
826 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
827 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
828 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
829 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
830 }
831 }
832
833 static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
834 unsigned stream)
835 {
836 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
837 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
838 radeon_emit(cs, va);
839 radeon_emit(cs, va >> 32);
840 }
841
842 static void si_query_hw_do_emit_start(struct si_context *sctx,
843 struct si_query_hw *query,
844 struct si_resource *buffer,
845 uint64_t va)
846 {
847 struct radeon_cmdbuf *cs = sctx->gfx_cs;
848
849 switch (query->b.type) {
850 case SI_QUERY_TIME_ELAPSED_SDMA:
851 si_dma_emit_timestamp(sctx, buffer, va - buffer->gpu_address);
852 return;
853 case PIPE_QUERY_OCCLUSION_COUNTER:
854 case PIPE_QUERY_OCCLUSION_PREDICATE:
855 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
856 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
857 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
858 radeon_emit(cs, va);
859 radeon_emit(cs, va >> 32);
860 break;
861 case PIPE_QUERY_PRIMITIVES_EMITTED:
862 case PIPE_QUERY_PRIMITIVES_GENERATED:
863 case PIPE_QUERY_SO_STATISTICS:
864 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
865 emit_sample_streamout(cs, va, query->stream);
866 break;
867 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
868 for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
869 emit_sample_streamout(cs, va + 32 * stream, stream);
870 break;
871 case PIPE_QUERY_TIME_ELAPSED:
872 si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0,
873 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
874 EOP_DATA_SEL_TIMESTAMP, NULL, va,
875 0, query->b.type);
876 break;
877 case PIPE_QUERY_PIPELINE_STATISTICS:
878 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
879 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
880 radeon_emit(cs, va);
881 radeon_emit(cs, va >> 32);
882 break;
883 default:
884 assert(0);
885 }
886 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
887 RADEON_PRIO_QUERY);
888 }
889
890 static void si_query_hw_emit_start(struct si_context *sctx,
891 struct si_query_hw *query)
892 {
893 uint64_t va;
894
895 if (!si_query_buffer_alloc(sctx, &query->buffer, query->ops->prepare_buffer,
896 query->result_size))
897 return;
898
899 si_update_occlusion_query_state(sctx, query->b.type, 1);
900 si_update_prims_generated_query_state(sctx, query->b.type, 1);
901
902 if (query->b.type == PIPE_QUERY_PIPELINE_STATISTICS)
903 sctx->num_pipeline_stat_queries++;
904
905 if (query->b.type != SI_QUERY_TIME_ELAPSED_SDMA)
906 si_need_gfx_cs_space(sctx);
907
908 va = query->buffer.buf->gpu_address + query->buffer.results_end;
909 query->ops->emit_start(sctx, query, query->buffer.buf, va);
910 }
911
912 static void si_query_hw_do_emit_stop(struct si_context *sctx,
913 struct si_query_hw *query,
914 struct si_resource *buffer,
915 uint64_t va)
916 {
917 struct radeon_cmdbuf *cs = sctx->gfx_cs;
918 uint64_t fence_va = 0;
919
920 switch (query->b.type) {
921 case SI_QUERY_TIME_ELAPSED_SDMA:
922 si_dma_emit_timestamp(sctx, buffer, va + 32 - buffer->gpu_address);
923 return;
924 case PIPE_QUERY_OCCLUSION_COUNTER:
925 case PIPE_QUERY_OCCLUSION_PREDICATE:
926 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
927 va += 8;
928 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
929 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
930 radeon_emit(cs, va);
931 radeon_emit(cs, va >> 32);
932
933 fence_va = va + sctx->screen->info.num_render_backends * 16 - 8;
934 break;
935 case PIPE_QUERY_PRIMITIVES_EMITTED:
936 case PIPE_QUERY_PRIMITIVES_GENERATED:
937 case PIPE_QUERY_SO_STATISTICS:
938 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
939 va += 16;
940 emit_sample_streamout(cs, va, query->stream);
941 break;
942 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
943 va += 16;
944 for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
945 emit_sample_streamout(cs, va + 32 * stream, stream);
946 break;
947 case PIPE_QUERY_TIME_ELAPSED:
948 va += 8;
949 /* fall through */
950 case PIPE_QUERY_TIMESTAMP:
951 si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0,
952 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
953 EOP_DATA_SEL_TIMESTAMP, NULL, va,
954 0, query->b.type);
955 fence_va = va + 8;
956 break;
957 case PIPE_QUERY_PIPELINE_STATISTICS: {
958 unsigned sample_size = (query->result_size - 8) / 2;
959
960 va += sample_size;
961 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
962 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
963 radeon_emit(cs, va);
964 radeon_emit(cs, va >> 32);
965
966 fence_va = va + sample_size;
967 break;
968 }
969 default:
970 assert(0);
971 }
972 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
973 RADEON_PRIO_QUERY);
974
975 if (fence_va) {
976 si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0,
977 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
978 EOP_DATA_SEL_VALUE_32BIT,
979 query->buffer.buf, fence_va, 0x80000000,
980 query->b.type);
981 }
982 }
983
984 static void si_query_hw_emit_stop(struct si_context *sctx,
985 struct si_query_hw *query)
986 {
987 uint64_t va;
988
989 /* The queries which need begin already called this in begin_query. */
990 if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
991 si_need_gfx_cs_space(sctx);
992 if (!si_query_buffer_alloc(sctx, &query->buffer, query->ops->prepare_buffer,
993 query->result_size))
994 return;
995 }
996
997 if (!query->buffer.buf)
998 return; // previous buffer allocation failure
999
1000 /* emit end query */
1001 va = query->buffer.buf->gpu_address + query->buffer.results_end;
1002
1003 query->ops->emit_stop(sctx, query, query->buffer.buf, va);
1004
1005 query->buffer.results_end += query->result_size;
1006
1007 si_update_occlusion_query_state(sctx, query->b.type, -1);
1008 si_update_prims_generated_query_state(sctx, query->b.type, -1);
1009
1010 if (query->b.type == PIPE_QUERY_PIPELINE_STATISTICS)
1011 sctx->num_pipeline_stat_queries--;
1012 }
1013
1014 static void emit_set_predicate(struct si_context *ctx,
1015 struct si_resource *buf, uint64_t va,
1016 uint32_t op)
1017 {
1018 struct radeon_cmdbuf *cs = ctx->gfx_cs;
1019
1020 if (ctx->chip_class >= GFX9) {
1021 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
1022 radeon_emit(cs, op);
1023 radeon_emit(cs, va);
1024 radeon_emit(cs, va >> 32);
1025 } else {
1026 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
1027 radeon_emit(cs, va);
1028 radeon_emit(cs, op | ((va >> 32) & 0xFF));
1029 }
1030 radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
1031 RADEON_PRIO_QUERY);
1032 }
1033
1034 static void si_emit_query_predication(struct si_context *ctx)
1035 {
1036 struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
1037 struct si_query_buffer *qbuf;
1038 uint32_t op;
1039 bool flag_wait, invert;
1040
1041 if (!query)
1042 return;
1043
1044 if (ctx->screen->use_ngg_streamout &&
1045 (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1046 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)) {
1047 assert(!"not implemented");
1048 }
1049
1050 invert = ctx->render_cond_invert;
1051 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
1052 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
1053
1054 if (query->workaround_buf) {
1055 op = PRED_OP(PREDICATION_OP_BOOL64);
1056 } else {
1057 switch (query->b.type) {
1058 case PIPE_QUERY_OCCLUSION_COUNTER:
1059 case PIPE_QUERY_OCCLUSION_PREDICATE:
1060 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1061 op = PRED_OP(PREDICATION_OP_ZPASS);
1062 break;
1063 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1064 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1065 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
1066 invert = !invert;
1067 break;
1068 default:
1069 assert(0);
1070 return;
1071 }
1072 }
1073
1074 /* if true then invert, see GL_ARB_conditional_render_inverted */
1075 if (invert)
1076 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
1077 else
1078 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
1079
1080 /* Use the value written by compute shader as a workaround. Note that
1081 * the wait flag does not apply in this predication mode.
1082 *
1083 * The shader outputs the result value to L2. Workarounds only affect GFX8
1084 * and later, where the CP reads data from L2, so we don't need an
1085 * additional flush.
1086 */
1087 if (query->workaround_buf) {
1088 uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
1089 emit_set_predicate(ctx, query->workaround_buf, va, op);
1090 return;
1091 }
1092
1093 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
1094
1095 /* emit predicate packets for all data blocks */
1096 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1097 unsigned results_base = 0;
1098 uint64_t va_base = qbuf->buf->gpu_address;
1099
1100 while (results_base < qbuf->results_end) {
1101 uint64_t va = va_base + results_base;
1102
1103 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
1104 for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1105 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
1106
1107 /* set CONTINUE bit for all packets except the first */
1108 op |= PREDICATION_CONTINUE;
1109 }
1110 } else {
1111 emit_set_predicate(ctx, qbuf->buf, va, op);
1112 op |= PREDICATION_CONTINUE;
1113 }
1114
1115 results_base += query->result_size;
1116 }
1117 }
1118 }
1119
1120 static struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
1121 {
1122 struct si_screen *sscreen =
1123 (struct si_screen *)ctx->screen;
1124
1125 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
1126 query_type == PIPE_QUERY_GPU_FINISHED ||
1127 (query_type >= PIPE_QUERY_DRIVER_SPECIFIC &&
1128 query_type != SI_QUERY_TIME_ELAPSED_SDMA))
1129 return si_query_sw_create(query_type);
1130
1131 if (sscreen->use_ngg_streamout &&
1132 (query_type == PIPE_QUERY_PRIMITIVES_EMITTED ||
1133 query_type == PIPE_QUERY_PRIMITIVES_GENERATED ||
1134 query_type == PIPE_QUERY_SO_STATISTICS ||
1135 query_type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1136 query_type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE))
1137 return gfx10_sh_query_create(sscreen, query_type, index);
1138
1139 return si_query_hw_create(sscreen, query_type, index);
1140 }
1141
1142 static void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1143 {
1144 struct si_context *sctx = (struct si_context *)ctx;
1145 struct si_query *squery = (struct si_query *)query;
1146
1147 squery->ops->destroy(sctx, squery);
1148 }
1149
1150 static bool si_begin_query(struct pipe_context *ctx,
1151 struct pipe_query *query)
1152 {
1153 struct si_context *sctx = (struct si_context *)ctx;
1154 struct si_query *squery = (struct si_query *)query;
1155
1156 return squery->ops->begin(sctx, squery);
1157 }
1158
1159 bool si_query_hw_begin(struct si_context *sctx,
1160 struct si_query *squery)
1161 {
1162 struct si_query_hw *query = (struct si_query_hw *)squery;
1163
1164 if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
1165 assert(0);
1166 return false;
1167 }
1168
1169 if (!(query->flags & SI_QUERY_HW_FLAG_BEGIN_RESUMES))
1170 si_query_buffer_reset(sctx, &query->buffer);
1171
1172 si_resource_reference(&query->workaround_buf, NULL);
1173
1174 si_query_hw_emit_start(sctx, query);
1175 if (!query->buffer.buf)
1176 return false;
1177
1178 list_addtail(&query->b.active_list, &sctx->active_queries);
1179 sctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
1180 return true;
1181 }
1182
1183 static bool si_end_query(struct pipe_context *ctx, struct pipe_query *query)
1184 {
1185 struct si_context *sctx = (struct si_context *)ctx;
1186 struct si_query *squery = (struct si_query *)query;
1187
1188 return squery->ops->end(sctx, squery);
1189 }
1190
1191 bool si_query_hw_end(struct si_context *sctx,
1192 struct si_query *squery)
1193 {
1194 struct si_query_hw *query = (struct si_query_hw *)squery;
1195
1196 if (query->flags & SI_QUERY_HW_FLAG_NO_START)
1197 si_query_buffer_reset(sctx, &query->buffer);
1198
1199 si_query_hw_emit_stop(sctx, query);
1200
1201 if (!(query->flags & SI_QUERY_HW_FLAG_NO_START)) {
1202 list_delinit(&query->b.active_list);
1203 sctx->num_cs_dw_queries_suspend -= query->b.num_cs_dw_suspend;
1204 }
1205
1206 if (!query->buffer.buf)
1207 return false;
1208
1209 return true;
1210 }
1211
1212 static void si_get_hw_query_params(struct si_context *sctx,
1213 struct si_query_hw *squery, int index,
1214 struct si_hw_query_params *params)
1215 {
1216 unsigned max_rbs = sctx->screen->info.num_render_backends;
1217
1218 params->pair_stride = 0;
1219 params->pair_count = 1;
1220
1221 switch (squery->b.type) {
1222 case PIPE_QUERY_OCCLUSION_COUNTER:
1223 case PIPE_QUERY_OCCLUSION_PREDICATE:
1224 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1225 params->start_offset = 0;
1226 params->end_offset = 8;
1227 params->fence_offset = max_rbs * 16;
1228 params->pair_stride = 16;
1229 params->pair_count = max_rbs;
1230 break;
1231 case PIPE_QUERY_TIME_ELAPSED:
1232 params->start_offset = 0;
1233 params->end_offset = 8;
1234 params->fence_offset = 16;
1235 break;
1236 case PIPE_QUERY_TIMESTAMP:
1237 params->start_offset = 0;
1238 params->end_offset = 0;
1239 params->fence_offset = 8;
1240 break;
1241 case PIPE_QUERY_PRIMITIVES_EMITTED:
1242 params->start_offset = 8;
1243 params->end_offset = 24;
1244 params->fence_offset = params->end_offset + 4;
1245 break;
1246 case PIPE_QUERY_PRIMITIVES_GENERATED:
1247 params->start_offset = 0;
1248 params->end_offset = 16;
1249 params->fence_offset = params->end_offset + 4;
1250 break;
1251 case PIPE_QUERY_SO_STATISTICS:
1252 params->start_offset = 8 - index * 8;
1253 params->end_offset = 24 - index * 8;
1254 params->fence_offset = params->end_offset + 4;
1255 break;
1256 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1257 params->pair_count = SI_MAX_STREAMS;
1258 params->pair_stride = 32;
1259 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1260 params->start_offset = 0;
1261 params->end_offset = 16;
1262
1263 /* We can re-use the high dword of the last 64-bit value as a
1264 * fence: it is initialized as 0, and the high bit is set by
1265 * the write of the streamout stats event.
1266 */
1267 params->fence_offset = squery->result_size - 4;
1268 break;
1269 case PIPE_QUERY_PIPELINE_STATISTICS:
1270 {
1271 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1272 params->start_offset = offsets[index];
1273 params->end_offset = 88 + offsets[index];
1274 params->fence_offset = 2 * 88;
1275 break;
1276 }
1277 default:
1278 unreachable("si_get_hw_query_params unsupported");
1279 }
1280 }
1281
1282 static unsigned si_query_read_result(void *map, unsigned start_index, unsigned end_index,
1283 bool test_status_bit)
1284 {
1285 uint32_t *current_result = (uint32_t*)map;
1286 uint64_t start, end;
1287
1288 start = (uint64_t)current_result[start_index] |
1289 (uint64_t)current_result[start_index+1] << 32;
1290 end = (uint64_t)current_result[end_index] |
1291 (uint64_t)current_result[end_index+1] << 32;
1292
1293 if (!test_status_bit ||
1294 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1295 return end - start;
1296 }
1297 return 0;
1298 }
1299
1300 static void si_query_hw_add_result(struct si_screen *sscreen,
1301 struct si_query_hw *query,
1302 void *buffer,
1303 union pipe_query_result *result)
1304 {
1305 unsigned max_rbs = sscreen->info.num_render_backends;
1306
1307 switch (query->b.type) {
1308 case PIPE_QUERY_OCCLUSION_COUNTER: {
1309 for (unsigned i = 0; i < max_rbs; ++i) {
1310 unsigned results_base = i * 16;
1311 result->u64 +=
1312 si_query_read_result(buffer + results_base, 0, 2, true);
1313 }
1314 break;
1315 }
1316 case PIPE_QUERY_OCCLUSION_PREDICATE:
1317 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1318 for (unsigned i = 0; i < max_rbs; ++i) {
1319 unsigned results_base = i * 16;
1320 result->b = result->b ||
1321 si_query_read_result(buffer + results_base, 0, 2, true) != 0;
1322 }
1323 break;
1324 }
1325 case PIPE_QUERY_TIME_ELAPSED:
1326 result->u64 += si_query_read_result(buffer, 0, 2, false);
1327 break;
1328 case SI_QUERY_TIME_ELAPSED_SDMA:
1329 result->u64 += si_query_read_result(buffer, 0, 32/4, false);
1330 break;
1331 case PIPE_QUERY_TIMESTAMP:
1332 result->u64 = *(uint64_t*)buffer;
1333 break;
1334 case PIPE_QUERY_PRIMITIVES_EMITTED:
1335 /* SAMPLE_STREAMOUTSTATS stores this structure:
1336 * {
1337 * u64 NumPrimitivesWritten;
1338 * u64 PrimitiveStorageNeeded;
1339 * }
1340 * We only need NumPrimitivesWritten here. */
1341 result->u64 += si_query_read_result(buffer, 2, 6, true);
1342 break;
1343 case PIPE_QUERY_PRIMITIVES_GENERATED:
1344 /* Here we read PrimitiveStorageNeeded. */
1345 result->u64 += si_query_read_result(buffer, 0, 4, true);
1346 break;
1347 case PIPE_QUERY_SO_STATISTICS:
1348 result->so_statistics.num_primitives_written +=
1349 si_query_read_result(buffer, 2, 6, true);
1350 result->so_statistics.primitives_storage_needed +=
1351 si_query_read_result(buffer, 0, 4, true);
1352 break;
1353 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1354 result->b = result->b ||
1355 si_query_read_result(buffer, 2, 6, true) !=
1356 si_query_read_result(buffer, 0, 4, true);
1357 break;
1358 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1359 for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1360 result->b = result->b ||
1361 si_query_read_result(buffer, 2, 6, true) !=
1362 si_query_read_result(buffer, 0, 4, true);
1363 buffer = (char *)buffer + 32;
1364 }
1365 break;
1366 case PIPE_QUERY_PIPELINE_STATISTICS:
1367 result->pipeline_statistics.ps_invocations +=
1368 si_query_read_result(buffer, 0, 22, false);
1369 result->pipeline_statistics.c_primitives +=
1370 si_query_read_result(buffer, 2, 24, false);
1371 result->pipeline_statistics.c_invocations +=
1372 si_query_read_result(buffer, 4, 26, false);
1373 result->pipeline_statistics.vs_invocations +=
1374 si_query_read_result(buffer, 6, 28, false);
1375 result->pipeline_statistics.gs_invocations +=
1376 si_query_read_result(buffer, 8, 30, false);
1377 result->pipeline_statistics.gs_primitives +=
1378 si_query_read_result(buffer, 10, 32, false);
1379 result->pipeline_statistics.ia_primitives +=
1380 si_query_read_result(buffer, 12, 34, false);
1381 result->pipeline_statistics.ia_vertices +=
1382 si_query_read_result(buffer, 14, 36, false);
1383 result->pipeline_statistics.hs_invocations +=
1384 si_query_read_result(buffer, 16, 38, false);
1385 result->pipeline_statistics.ds_invocations +=
1386 si_query_read_result(buffer, 18, 40, false);
1387 result->pipeline_statistics.cs_invocations +=
1388 si_query_read_result(buffer, 20, 42, false);
1389 #if 0 /* for testing */
1390 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1391 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1392 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1393 result->pipeline_statistics.ia_vertices,
1394 result->pipeline_statistics.ia_primitives,
1395 result->pipeline_statistics.vs_invocations,
1396 result->pipeline_statistics.hs_invocations,
1397 result->pipeline_statistics.ds_invocations,
1398 result->pipeline_statistics.gs_invocations,
1399 result->pipeline_statistics.gs_primitives,
1400 result->pipeline_statistics.c_invocations,
1401 result->pipeline_statistics.c_primitives,
1402 result->pipeline_statistics.ps_invocations,
1403 result->pipeline_statistics.cs_invocations);
1404 #endif
1405 break;
1406 default:
1407 assert(0);
1408 }
1409 }
1410
1411 void si_query_hw_suspend(struct si_context *sctx, struct si_query *query)
1412 {
1413 si_query_hw_emit_stop(sctx, (struct si_query_hw *)query);
1414 }
1415
1416 void si_query_hw_resume(struct si_context *sctx, struct si_query *query)
1417 {
1418 si_query_hw_emit_start(sctx, (struct si_query_hw *)query);
1419 }
1420
1421 static const struct si_query_ops query_hw_ops = {
1422 .destroy = si_query_hw_destroy,
1423 .begin = si_query_hw_begin,
1424 .end = si_query_hw_end,
1425 .get_result = si_query_hw_get_result,
1426 .get_result_resource = si_query_hw_get_result_resource,
1427
1428 .suspend = si_query_hw_suspend,
1429 .resume = si_query_hw_resume,
1430 };
1431
1432 static bool si_get_query_result(struct pipe_context *ctx,
1433 struct pipe_query *query, bool wait,
1434 union pipe_query_result *result)
1435 {
1436 struct si_context *sctx = (struct si_context *)ctx;
1437 struct si_query *squery = (struct si_query *)query;
1438
1439 return squery->ops->get_result(sctx, squery, wait, result);
1440 }
1441
1442 static void si_get_query_result_resource(struct pipe_context *ctx,
1443 struct pipe_query *query,
1444 bool wait,
1445 enum pipe_query_value_type result_type,
1446 int index,
1447 struct pipe_resource *resource,
1448 unsigned offset)
1449 {
1450 struct si_context *sctx = (struct si_context *)ctx;
1451 struct si_query *squery = (struct si_query *)query;
1452
1453 squery->ops->get_result_resource(sctx, squery, wait, result_type, index,
1454 resource, offset);
1455 }
1456
1457 static void si_query_hw_clear_result(struct si_query_hw *query,
1458 union pipe_query_result *result)
1459 {
1460 util_query_clear_result(result, query->b.type);
1461 }
1462
1463 bool si_query_hw_get_result(struct si_context *sctx,
1464 struct si_query *squery,
1465 bool wait, union pipe_query_result *result)
1466 {
1467 struct si_screen *sscreen = sctx->screen;
1468 struct si_query_hw *query = (struct si_query_hw *)squery;
1469 struct si_query_buffer *qbuf;
1470
1471 query->ops->clear_result(query, result);
1472
1473 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1474 unsigned usage = PIPE_TRANSFER_READ |
1475 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1476 unsigned results_base = 0;
1477 void *map;
1478
1479 if (squery->b.flushed)
1480 map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1481 else
1482 map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
1483
1484 if (!map)
1485 return false;
1486
1487 while (results_base != qbuf->results_end) {
1488 query->ops->add_result(sscreen, query, map + results_base,
1489 result);
1490 results_base += query->result_size;
1491 }
1492 }
1493
1494 /* Convert the time to expected units. */
1495 if (squery->type == PIPE_QUERY_TIME_ELAPSED ||
1496 squery->type == SI_QUERY_TIME_ELAPSED_SDMA ||
1497 squery->type == PIPE_QUERY_TIMESTAMP) {
1498 result->u64 = (1000000 * result->u64) / sscreen->info.clock_crystal_freq;
1499 }
1500 return true;
1501 }
1502
1503 static void si_query_hw_get_result_resource(struct si_context *sctx,
1504 struct si_query *squery,
1505 bool wait,
1506 enum pipe_query_value_type result_type,
1507 int index,
1508 struct pipe_resource *resource,
1509 unsigned offset)
1510 {
1511 struct si_query_hw *query = (struct si_query_hw *)squery;
1512 struct si_query_buffer *qbuf;
1513 struct si_query_buffer *qbuf_prev;
1514 struct pipe_resource *tmp_buffer = NULL;
1515 unsigned tmp_buffer_offset = 0;
1516 struct si_qbo_state saved_state = {};
1517 struct pipe_grid_info grid = {};
1518 struct pipe_constant_buffer constant_buffer = {};
1519 struct pipe_shader_buffer ssbo[3];
1520 struct si_hw_query_params params;
1521 struct {
1522 uint32_t end_offset;
1523 uint32_t result_stride;
1524 uint32_t result_count;
1525 uint32_t config;
1526 uint32_t fence_offset;
1527 uint32_t pair_stride;
1528 uint32_t pair_count;
1529 } consts;
1530
1531 if (!sctx->query_result_shader) {
1532 sctx->query_result_shader = si_create_query_result_cs(sctx);
1533 if (!sctx->query_result_shader)
1534 return;
1535 }
1536
1537 if (query->buffer.previous) {
1538 u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16,
1539 &tmp_buffer_offset, &tmp_buffer);
1540 if (!tmp_buffer)
1541 return;
1542 }
1543
1544 si_save_qbo_state(sctx, &saved_state);
1545
1546 si_get_hw_query_params(sctx, query, index >= 0 ? index : 0, &params);
1547 consts.end_offset = params.end_offset - params.start_offset;
1548 consts.fence_offset = params.fence_offset - params.start_offset;
1549 consts.result_stride = query->result_size;
1550 consts.pair_stride = params.pair_stride;
1551 consts.pair_count = params.pair_count;
1552
1553 constant_buffer.buffer_size = sizeof(consts);
1554 constant_buffer.user_buffer = &consts;
1555
1556 ssbo[1].buffer = tmp_buffer;
1557 ssbo[1].buffer_offset = tmp_buffer_offset;
1558 ssbo[1].buffer_size = 16;
1559
1560 ssbo[2] = ssbo[1];
1561
1562 sctx->b.bind_compute_state(&sctx->b, sctx->query_result_shader);
1563
1564 grid.block[0] = 1;
1565 grid.block[1] = 1;
1566 grid.block[2] = 1;
1567 grid.grid[0] = 1;
1568 grid.grid[1] = 1;
1569 grid.grid[2] = 1;
1570
1571 consts.config = 0;
1572 if (index < 0)
1573 consts.config |= 4;
1574 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1575 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1576 consts.config |= 8;
1577 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1578 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1579 consts.config |= 8 | 256;
1580 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1581 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1582 consts.config |= 32;
1583
1584 switch (result_type) {
1585 case PIPE_QUERY_TYPE_U64:
1586 case PIPE_QUERY_TYPE_I64:
1587 consts.config |= 64;
1588 break;
1589 case PIPE_QUERY_TYPE_I32:
1590 consts.config |= 128;
1591 break;
1592 case PIPE_QUERY_TYPE_U32:
1593 break;
1594 }
1595
1596 sctx->flags |= sctx->screen->barrier_flags.cp_to_L2;
1597
1598 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1599 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1600 qbuf_prev = qbuf->previous;
1601 consts.result_count = qbuf->results_end / query->result_size;
1602 consts.config &= ~3;
1603 if (qbuf != &query->buffer)
1604 consts.config |= 1;
1605 if (qbuf->previous)
1606 consts.config |= 2;
1607 } else {
1608 /* Only read the last timestamp. */
1609 qbuf_prev = NULL;
1610 consts.result_count = 0;
1611 consts.config |= 16;
1612 params.start_offset += qbuf->results_end - query->result_size;
1613 }
1614
1615 sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1616
1617 ssbo[0].buffer = &qbuf->buf->b.b;
1618 ssbo[0].buffer_offset = params.start_offset;
1619 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1620
1621 if (!qbuf->previous) {
1622 ssbo[2].buffer = resource;
1623 ssbo[2].buffer_offset = offset;
1624 ssbo[2].buffer_size = 8;
1625
1626 si_resource(resource)->TC_L2_dirty = true;
1627 }
1628
1629 sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo,
1630 1 << 2);
1631
1632 if (wait && qbuf == &query->buffer) {
1633 uint64_t va;
1634
1635 /* Wait for result availability. Wait only for readiness
1636 * of the last entry, since the fence writes should be
1637 * serialized in the CP.
1638 */
1639 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1640 va += params.fence_offset;
1641
1642 si_cp_wait_mem(sctx, sctx->gfx_cs, va, 0x80000000,
1643 0x80000000, WAIT_REG_MEM_EQUAL);
1644 }
1645
1646 sctx->b.launch_grid(&sctx->b, &grid);
1647 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
1648 }
1649
1650 si_restore_qbo_state(sctx, &saved_state);
1651 pipe_resource_reference(&tmp_buffer, NULL);
1652 }
1653
1654 static void si_render_condition(struct pipe_context *ctx,
1655 struct pipe_query *query,
1656 bool condition,
1657 enum pipe_render_cond_flag mode)
1658 {
1659 struct si_context *sctx = (struct si_context *)ctx;
1660 struct si_query_hw *squery = (struct si_query_hw *)query;
1661 struct si_atom *atom = &sctx->atoms.s.render_cond;
1662
1663 if (query) {
1664 bool needs_workaround = false;
1665
1666 /* There was a firmware regression in GFX8 which causes successive
1667 * SET_PREDICATION packets to give the wrong answer for
1668 * non-inverted stream overflow predication.
1669 */
1670 if (((sctx->chip_class == GFX8 && sctx->screen->info.pfp_fw_feature < 49) ||
1671 (sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
1672 !condition &&
1673 (squery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
1674 (squery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
1675 (squery->buffer.previous ||
1676 squery->buffer.results_end > squery->result_size)))) {
1677 needs_workaround = true;
1678 }
1679
1680 if (needs_workaround && !squery->workaround_buf) {
1681 bool old_force_off = sctx->render_cond_force_off;
1682 sctx->render_cond_force_off = true;
1683
1684 u_suballocator_alloc(
1685 sctx->allocator_zeroed_memory, 8, 8,
1686 &squery->workaround_offset,
1687 (struct pipe_resource **)&squery->workaround_buf);
1688
1689 /* Reset to NULL to avoid a redundant SET_PREDICATION
1690 * from launching the compute grid.
1691 */
1692 sctx->render_cond = NULL;
1693
1694 ctx->get_query_result_resource(
1695 ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
1696 &squery->workaround_buf->b.b, squery->workaround_offset);
1697
1698 /* Settings this in the render cond atom is too late,
1699 * so set it here. */
1700 sctx->flags |= sctx->screen->barrier_flags.L2_to_cp |
1701 SI_CONTEXT_FLUSH_FOR_RENDER_COND;
1702
1703 sctx->render_cond_force_off = old_force_off;
1704 }
1705 }
1706
1707 sctx->render_cond = query;
1708 sctx->render_cond_invert = condition;
1709 sctx->render_cond_mode = mode;
1710
1711 si_set_atom_dirty(sctx, atom, query != NULL);
1712 }
1713
1714 void si_suspend_queries(struct si_context *sctx)
1715 {
1716 struct si_query *query;
1717
1718 LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, active_list)
1719 query->ops->suspend(sctx, query);
1720 }
1721
1722 void si_resume_queries(struct si_context *sctx)
1723 {
1724 struct si_query *query;
1725
1726 /* Check CS space here. Resuming must not be interrupted by flushes. */
1727 si_need_gfx_cs_space(sctx);
1728
1729 LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, active_list)
1730 query->ops->resume(sctx, query);
1731 }
1732
1733 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1734 { \
1735 .name = name_, \
1736 .query_type = SI_QUERY_##query_type_, \
1737 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1738 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1739 .group_id = group_id_ \
1740 }
1741
1742 #define X(name_, query_type_, type_, result_type_) \
1743 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1744
1745 #define XG(group_, name_, query_type_, type_, result_type_) \
1746 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1747
1748 static struct pipe_driver_query_info si_driver_query_list[] = {
1749 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1750 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1751 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1752 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
1753 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1754 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1755 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1756 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1757 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1758 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1759 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1760 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1761 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1762 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1763 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1764 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1765 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1766 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1767 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1768 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1769 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1770 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1771 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1772 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1773 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1774 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1775 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1776 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1777 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1778 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1779 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1780 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1781 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1782 X("GFX-IB-size", GFX_IB_SIZE, UINT64, AVERAGE),
1783 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1784 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1785 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1786 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1787 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1788 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1789 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1790 X("live-shader-cache-hits", LIVE_SHADER_CACHE_HITS, UINT, CUMULATIVE),
1791 X("live-shader-cache-misses", LIVE_SHADER_CACHE_MISSES, UINT, CUMULATIVE),
1792 X("memory-shader-cache-hits", MEMORY_SHADER_CACHE_HITS, UINT, CUMULATIVE),
1793 X("memory-shader-cache-misses", MEMORY_SHADER_CACHE_MISSES, UINT, CUMULATIVE),
1794 X("disk-shader-cache-hits", DISK_SHADER_CACHE_HITS, UINT, CUMULATIVE),
1795 X("disk-shader-cache-misses", DISK_SHADER_CACHE_MISSES, UINT, CUMULATIVE),
1796
1797 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1798 * which use it as a fallback path to detect the GPU type.
1799 *
1800 * Note: The names of these queries are significant for GPUPerfStudio
1801 * (and possibly their order as well). */
1802 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1803 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1804 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1805 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1806 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1807
1808 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1809 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1810 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1811
1812 /* The following queries must be at the end of the list because their
1813 * availability is adjusted dynamically based on the DRM version. */
1814 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1815 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1816 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1817 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1818 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1819 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1820 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1821 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1822 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1823 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1824 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1825 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1826 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1827 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1828
1829 /* SRBM_STATUS2 */
1830 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1831
1832 /* CP_STAT */
1833 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1834 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1835 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1836 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1837 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
1838 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1839
1840 X("pd-num-prims-accepted", PD_NUM_PRIMS_ACCEPTED, UINT64, AVERAGE),
1841 X("pd-num-prims-rejected", PD_NUM_PRIMS_REJECTED, UINT64, AVERAGE),
1842 X("pd-num-prims-ineligible", PD_NUM_PRIMS_INELIGIBLE,UINT64, AVERAGE),
1843 };
1844
1845 #undef X
1846 #undef XG
1847 #undef XFULL
1848
1849 static unsigned si_get_num_queries(struct si_screen *sscreen)
1850 {
1851 /* amdgpu */
1852 if (sscreen->info.is_amdgpu) {
1853 if (sscreen->info.chip_class >= GFX8)
1854 return ARRAY_SIZE(si_driver_query_list);
1855 else
1856 return ARRAY_SIZE(si_driver_query_list) - 7;
1857 }
1858
1859 /* radeon */
1860 if (sscreen->info.has_read_registers_query) {
1861 if (sscreen->info.chip_class == GFX7)
1862 return ARRAY_SIZE(si_driver_query_list) - 6;
1863 else
1864 return ARRAY_SIZE(si_driver_query_list) - 7;
1865 }
1866
1867 return ARRAY_SIZE(si_driver_query_list) - 21;
1868 }
1869
1870 static int si_get_driver_query_info(struct pipe_screen *screen,
1871 unsigned index,
1872 struct pipe_driver_query_info *info)
1873 {
1874 struct si_screen *sscreen = (struct si_screen*)screen;
1875 unsigned num_queries = si_get_num_queries(sscreen);
1876
1877 if (!info) {
1878 unsigned num_perfcounters =
1879 si_get_perfcounter_info(sscreen, 0, NULL);
1880
1881 return num_queries + num_perfcounters;
1882 }
1883
1884 if (index >= num_queries)
1885 return si_get_perfcounter_info(sscreen, index - num_queries, info);
1886
1887 *info = si_driver_query_list[index];
1888
1889 switch (info->query_type) {
1890 case SI_QUERY_REQUESTED_VRAM:
1891 case SI_QUERY_VRAM_USAGE:
1892 case SI_QUERY_MAPPED_VRAM:
1893 info->max_value.u64 = sscreen->info.vram_size;
1894 break;
1895 case SI_QUERY_REQUESTED_GTT:
1896 case SI_QUERY_GTT_USAGE:
1897 case SI_QUERY_MAPPED_GTT:
1898 info->max_value.u64 = sscreen->info.gart_size;
1899 break;
1900 case SI_QUERY_GPU_TEMPERATURE:
1901 info->max_value.u64 = 125;
1902 break;
1903 case SI_QUERY_VRAM_VIS_USAGE:
1904 info->max_value.u64 = sscreen->info.vram_vis_size;
1905 break;
1906 }
1907
1908 if (info->group_id != ~(unsigned)0 && sscreen->perfcounters)
1909 info->group_id += sscreen->perfcounters->num_groups;
1910
1911 return 1;
1912 }
1913
1914 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1915 * performance counter groups, so be careful when changing this and related
1916 * functions.
1917 */
1918 static int si_get_driver_query_group_info(struct pipe_screen *screen,
1919 unsigned index,
1920 struct pipe_driver_query_group_info *info)
1921 {
1922 struct si_screen *sscreen = (struct si_screen *)screen;
1923 unsigned num_pc_groups = 0;
1924
1925 if (sscreen->perfcounters)
1926 num_pc_groups = sscreen->perfcounters->num_groups;
1927
1928 if (!info)
1929 return num_pc_groups + SI_NUM_SW_QUERY_GROUPS;
1930
1931 if (index < num_pc_groups)
1932 return si_get_perfcounter_group_info(sscreen, index, info);
1933
1934 index -= num_pc_groups;
1935 if (index >= SI_NUM_SW_QUERY_GROUPS)
1936 return 0;
1937
1938 info->name = "GPIN";
1939 info->max_active_queries = 5;
1940 info->num_queries = 5;
1941 return 1;
1942 }
1943
1944 void si_init_query_functions(struct si_context *sctx)
1945 {
1946 sctx->b.create_query = si_create_query;
1947 sctx->b.create_batch_query = si_create_batch_query;
1948 sctx->b.destroy_query = si_destroy_query;
1949 sctx->b.begin_query = si_begin_query;
1950 sctx->b.end_query = si_end_query;
1951 sctx->b.get_query_result = si_get_query_result;
1952 sctx->b.get_query_result_resource = si_get_query_result_resource;
1953
1954 if (sctx->has_graphics) {
1955 sctx->atoms.s.render_cond.emit = si_emit_query_predication;
1956 sctx->b.render_condition = si_render_condition;
1957 }
1958
1959 list_inithead(&sctx->active_queries);
1960 }
1961
1962 void si_init_screen_query_functions(struct si_screen *sscreen)
1963 {
1964 sscreen->b.get_driver_query_info = si_get_driver_query_info;
1965 sscreen->b.get_driver_query_group_info = si_get_driver_query_group_info;
1966 }