radv/winsys: Set winsys bo priority on creation.
[mesa.git] / src / amd / vulkan / radv_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
35 #include "radv_cs.h"
36 #include "sid.h"
37
38 #define TIMESTAMP_NOT_READY UINT64_MAX
39
40 static const int pipelinestat_block_size = 11 * 8;
41 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
42
43 static unsigned get_max_db(struct radv_device *device)
44 {
45 unsigned num_db = device->physical_device->rad_info.num_render_backends;
46 MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
47
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask == ((1ull << num_db) - 1));
50
51 return num_db;
52 }
53
54
55 static nir_ssa_def *nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
56 {
57 return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
58 }
59
60 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
61 {
62 nir_ssa_def *counter = nir_load_var(b, var);
63
64 nir_if *if_stmt = nir_if_create(b->shader);
65 if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
66 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
67
68 b->cursor = nir_after_cf_list(&if_stmt->then_list);
69
70 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
71 nir_builder_instr_insert(b, &instr->instr);
72
73 b->cursor = nir_after_cf_node(&if_stmt->cf_node);
74 counter = nir_iadd(b, counter, nir_imm_int(b, 1));
75 nir_store_var(b, var, counter, 0x1);
76 }
77
78 static struct nir_ssa_def *
79 radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
80 {
81 nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
82 nir_intrinsic_set_base(flags, 0);
83 nir_intrinsic_set_range(flags, 16);
84 flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
85 flags->num_components = 1;
86 nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
87 nir_builder_instr_insert(b, &flags->instr);
88 return &flags->dest.ssa;
89 }
90
91 static nir_shader *
92 build_occlusion_query_shader(struct radv_device *device) {
93 /* the shader this builds is roughly
94 *
95 * push constants {
96 * uint32_t flags;
97 * uint32_t dst_stride;
98 * };
99 *
100 * uint32_t src_stride = 16 * db_count;
101 *
102 * location(binding = 0) buffer dst_buf;
103 * location(binding = 1) buffer src_buf;
104 *
105 * void main() {
106 * uint64_t result = 0;
107 * uint64_t src_offset = src_stride * global_id.x;
108 * uint64_t dst_offset = dst_stride * global_id.x;
109 * bool available = true;
110 * for (int i = 0; i < db_count; ++i) {
111 * uint64_t start = src_buf[src_offset + 16 * i];
112 * uint64_t end = src_buf[src_offset + 16 * i + 8];
113 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
114 * result += end - start;
115 * else
116 * available = false;
117 * }
118 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
119 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
120 * if (flags & VK_QUERY_RESULT_64_BIT)
121 * dst_buf[dst_offset] = result;
122 * else
123 * dst_buf[dst_offset] = (uint32_t)result.
124 * }
125 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
126 * dst_buf[dst_offset + elem_size] = available;
127 * }
128 * }
129 */
130 nir_builder b;
131 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
132 b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
133 b.shader->info.cs.local_size[0] = 64;
134 b.shader->info.cs.local_size[1] = 1;
135 b.shader->info.cs.local_size[2] = 1;
136
137 nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
138 nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
139 nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
140 nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
141 nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
142 unsigned db_count = get_max_db(device);
143
144 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
145
146 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
147 nir_intrinsic_vulkan_resource_index);
148 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
149 nir_intrinsic_set_desc_set(dst_buf, 0);
150 nir_intrinsic_set_binding(dst_buf, 0);
151 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
152 nir_builder_instr_insert(&b, &dst_buf->instr);
153
154 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
155 nir_intrinsic_vulkan_resource_index);
156 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
157 nir_intrinsic_set_desc_set(src_buf, 0);
158 nir_intrinsic_set_binding(src_buf, 1);
159 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
160 nir_builder_instr_insert(&b, &src_buf->instr);
161
162 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
163 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
164 nir_ssa_def *block_size = nir_imm_ivec4(&b,
165 b.shader->info.cs.local_size[0],
166 b.shader->info.cs.local_size[1],
167 b.shader->info.cs.local_size[2], 0);
168 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
169 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
170
171 nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
172 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
173 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
174 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
175
176
177 nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
178 nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
179 nir_store_var(&b, available, nir_imm_true(&b), 0x1);
180
181 nir_loop *outer_loop = nir_loop_create(b.shader);
182 nir_builder_cf_insert(&b, &outer_loop->cf_node);
183 b.cursor = nir_after_cf_list(&outer_loop->body);
184
185 nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
186 radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
187
188 nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
189 load_offset = nir_iadd(&b, input_base, load_offset);
190
191 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
192 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
193 load->src[1] = nir_src_for_ssa(load_offset);
194 nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
195 load->num_components = 2;
196 nir_builder_instr_insert(&b, &load->instr);
197
198 nir_store_var(&b, start, nir_channel(&b, &load->dest.ssa, 0), 0x1);
199 nir_store_var(&b, end, nir_channel(&b, &load->dest.ssa, 1), 0x1);
200
201 nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
202 nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
203
204 nir_if *update_if = nir_if_create(b.shader);
205 update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
206 nir_cf_node_insert(b.cursor, &update_if->cf_node);
207
208 b.cursor = nir_after_cf_list(&update_if->then_list);
209
210 nir_store_var(&b, result,
211 nir_iadd(&b, nir_load_var(&b, result),
212 nir_isub(&b, nir_load_var(&b, end),
213 nir_load_var(&b, start))), 0x1);
214
215 b.cursor = nir_after_cf_list(&update_if->else_list);
216
217 nir_store_var(&b, available, nir_imm_false(&b), 0x1);
218
219 b.cursor = nir_after_cf_node(&outer_loop->cf_node);
220
221 /* Store the result if complete or if partial results have been requested. */
222
223 nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
224 nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
225
226 nir_if *store_if = nir_if_create(b.shader);
227 store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
228 nir_cf_node_insert(b.cursor, &store_if->cf_node);
229
230 b.cursor = nir_after_cf_list(&store_if->then_list);
231
232 nir_if *store_64bit_if = nir_if_create(b.shader);
233 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
234 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
235
236 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
237
238 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
239 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
240 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
241 store->src[2] = nir_src_for_ssa(output_base);
242 nir_intrinsic_set_write_mask(store, 0x1);
243 store->num_components = 1;
244 nir_builder_instr_insert(&b, &store->instr);
245
246 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
247
248 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
249 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
250 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
251 store->src[2] = nir_src_for_ssa(output_base);
252 nir_intrinsic_set_write_mask(store, 0x1);
253 store->num_components = 1;
254 nir_builder_instr_insert(&b, &store->instr);
255
256 b.cursor = nir_after_cf_node(&store_if->cf_node);
257
258 /* Store the availability bit if requested. */
259
260 nir_if *availability_if = nir_if_create(b.shader);
261 availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
262 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
263
264 b.cursor = nir_after_cf_list(&availability_if->then_list);
265
266 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
267 store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
268 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
269 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
270 nir_intrinsic_set_write_mask(store, 0x1);
271 store->num_components = 1;
272 nir_builder_instr_insert(&b, &store->instr);
273
274 return b.shader;
275 }
276
277 static nir_shader *
278 build_pipeline_statistics_query_shader(struct radv_device *device) {
279 /* the shader this builds is roughly
280 *
281 * push constants {
282 * uint32_t flags;
283 * uint32_t dst_stride;
284 * uint32_t stats_mask;
285 * uint32_t avail_offset;
286 * };
287 *
288 * uint32_t src_stride = pipelinestat_block_size * 2;
289 *
290 * location(binding = 0) buffer dst_buf;
291 * location(binding = 1) buffer src_buf;
292 *
293 * void main() {
294 * uint64_t src_offset = src_stride * global_id.x;
295 * uint64_t dst_base = dst_stride * global_id.x;
296 * uint64_t dst_offset = dst_base;
297 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
298 * uint32_t elem_count = stats_mask >> 16;
299 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
300 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
301 * dst_buf[dst_offset + elem_count * elem_size] = available32;
302 * }
303 * if ((bool)available32) {
304 * // repeat 11 times:
305 * if (stats_mask & (1 << 0)) {
306 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
307 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
308 * uint64_t result = end - start;
309 * if (flags & VK_QUERY_RESULT_64_BIT)
310 * dst_buf[dst_offset] = result;
311 * else
312 * dst_buf[dst_offset] = (uint32_t)result.
313 * dst_offset += elem_size;
314 * }
315 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
316 * // Set everything to 0 as we don't know what is valid.
317 * for (int i = 0; i < elem_count; ++i)
318 * dst_buf[dst_base + elem_size * i] = 0;
319 * }
320 * }
321 */
322 nir_builder b;
323 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
324 b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
325 b.shader->info.cs.local_size[0] = 64;
326 b.shader->info.cs.local_size[1] = 1;
327 b.shader->info.cs.local_size[2] = 1;
328
329 nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
330
331 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
332 nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
333 nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
334
335 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
336 nir_intrinsic_vulkan_resource_index);
337 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
338 nir_intrinsic_set_desc_set(dst_buf, 0);
339 nir_intrinsic_set_binding(dst_buf, 0);
340 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
341 nir_builder_instr_insert(&b, &dst_buf->instr);
342
343 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
344 nir_intrinsic_vulkan_resource_index);
345 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
346 nir_intrinsic_set_desc_set(src_buf, 0);
347 nir_intrinsic_set_binding(src_buf, 1);
348 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
349 nir_builder_instr_insert(&b, &src_buf->instr);
350
351 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
352 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
353 nir_ssa_def *block_size = nir_imm_ivec4(&b,
354 b.shader->info.cs.local_size[0],
355 b.shader->info.cs.local_size[1],
356 b.shader->info.cs.local_size[2], 0);
357 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
358 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
359
360 nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
361 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
362 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
363 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
364
365
366 avail_offset = nir_iadd(&b, avail_offset,
367 nir_imul(&b, global_id, nir_imm_int(&b, 4)));
368
369 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
370 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
371 load->src[1] = nir_src_for_ssa(avail_offset);
372 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
373 load->num_components = 1;
374 nir_builder_instr_insert(&b, &load->instr);
375 nir_ssa_def *available32 = &load->dest.ssa;
376
377 nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
378 nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
379 nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
380
381 /* Store the availability bit if requested. */
382
383 nir_if *availability_if = nir_if_create(b.shader);
384 availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
385 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
386
387 b.cursor = nir_after_cf_list(&availability_if->then_list);
388
389 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
390 store->src[0] = nir_src_for_ssa(available32);
391 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
392 store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
393 nir_intrinsic_set_write_mask(store, 0x1);
394 store->num_components = 1;
395 nir_builder_instr_insert(&b, &store->instr);
396
397 b.cursor = nir_after_cf_node(&availability_if->cf_node);
398
399 nir_if *available_if = nir_if_create(b.shader);
400 available_if->condition = nir_src_for_ssa(nir_i2b(&b, available32));
401 nir_cf_node_insert(b.cursor, &available_if->cf_node);
402
403 b.cursor = nir_after_cf_list(&available_if->then_list);
404
405 nir_store_var(&b, output_offset, output_base, 0x1);
406 for (int i = 0; i < 11; ++i) {
407 nir_if *store_if = nir_if_create(b.shader);
408 store_if->condition = nir_src_for_ssa(nir_test_flag(&b, stats_mask, 1u << i));
409 nir_cf_node_insert(b.cursor, &store_if->cf_node);
410
411 b.cursor = nir_after_cf_list(&store_if->then_list);
412
413 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
414 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
415 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
416 nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
417 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
418 load->num_components = 1;
419 nir_builder_instr_insert(&b, &load->instr);
420 nir_ssa_def *start = &load->dest.ssa;
421
422 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
423 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
424 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
425 nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
426 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
427 load->num_components = 1;
428 nir_builder_instr_insert(&b, &load->instr);
429 nir_ssa_def *end = &load->dest.ssa;
430
431 nir_ssa_def *result = nir_isub(&b, end, start);
432
433 /* Store result */
434 nir_if *store_64bit_if = nir_if_create(b.shader);
435 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
436 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
437
438 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
439
440 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
441 store->src[0] = nir_src_for_ssa(result);
442 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
443 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
444 nir_intrinsic_set_write_mask(store, 0x1);
445 store->num_components = 1;
446 nir_builder_instr_insert(&b, &store->instr);
447
448 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
449
450 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
451 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
452 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
453 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
454 nir_intrinsic_set_write_mask(store, 0x1);
455 store->num_components = 1;
456 nir_builder_instr_insert(&b, &store->instr);
457
458 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
459
460 nir_store_var(&b, output_offset,
461 nir_iadd(&b, nir_load_var(&b, output_offset),
462 elem_size), 0x1);
463
464 b.cursor = nir_after_cf_node(&store_if->cf_node);
465 }
466
467 b.cursor = nir_after_cf_list(&available_if->else_list);
468
469 available_if = nir_if_create(b.shader);
470 available_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT));
471 nir_cf_node_insert(b.cursor, &available_if->cf_node);
472
473 b.cursor = nir_after_cf_list(&available_if->then_list);
474
475 /* Stores zeros in all outputs. */
476
477 nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
478 nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
479
480 nir_loop *loop = nir_loop_create(b.shader);
481 nir_builder_cf_insert(&b, &loop->cf_node);
482 b.cursor = nir_after_cf_list(&loop->body);
483
484 nir_ssa_def *current_counter = nir_load_var(&b, counter);
485 radv_break_on_count(&b, counter, elem_count);
486
487 nir_ssa_def *output_elem = nir_iadd(&b, output_base,
488 nir_imul(&b, elem_size, current_counter));
489
490 nir_if *store_64bit_if = nir_if_create(b.shader);
491 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
492 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
493
494 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
495
496 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
497 store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
498 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
499 store->src[2] = nir_src_for_ssa(output_elem);
500 nir_intrinsic_set_write_mask(store, 0x1);
501 store->num_components = 1;
502 nir_builder_instr_insert(&b, &store->instr);
503
504 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
505
506 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
507 store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
508 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
509 store->src[2] = nir_src_for_ssa(output_elem);
510 nir_intrinsic_set_write_mask(store, 0x1);
511 store->num_components = 1;
512 nir_builder_instr_insert(&b, &store->instr);
513
514 b.cursor = nir_after_cf_node(&loop->cf_node);
515 return b.shader;
516 }
517
518 static nir_shader *
519 build_tfb_query_shader(struct radv_device *device)
520 {
521 /* the shader this builds is roughly
522 *
523 * uint32_t src_stride = 32;
524 *
525 * location(binding = 0) buffer dst_buf;
526 * location(binding = 1) buffer src_buf;
527 *
528 * void main() {
529 * uint64_t result[2] = {};
530 * bool available = false;
531 * uint64_t src_offset = src_stride * global_id.x;
532 * uint64_t dst_offset = dst_stride * global_id.x;
533 * uint64_t *src_data = src_buf[src_offset];
534 * uint32_t avail = (src_data[0] >> 32) &
535 * (src_data[1] >> 32) &
536 * (src_data[2] >> 32) &
537 * (src_data[3] >> 32);
538 * if (avail & 0x80000000) {
539 * result[0] = src_data[3] - src_data[1];
540 * result[1] = src_data[2] - src_data[0];
541 * available = true;
542 * }
543 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
544 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
545 * if (flags & VK_QUERY_RESULT_64_BIT) {
546 * dst_buf[dst_offset] = result;
547 * } else {
548 * dst_buf[dst_offset] = (uint32_t)result;
549 * }
550 * }
551 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
552 * dst_buf[dst_offset + result_size] = available;
553 * }
554 * }
555 */
556 nir_builder b;
557 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
558 b.shader->info.name = ralloc_strdup(b.shader, "tfb_query");
559 b.shader->info.cs.local_size[0] = 64;
560 b.shader->info.cs.local_size[1] = 1;
561 b.shader->info.cs.local_size[2] = 1;
562
563 /* Create and initialize local variables. */
564 nir_variable *result =
565 nir_local_variable_create(b.impl,
566 glsl_vector_type(GLSL_TYPE_UINT64, 2),
567 "result");
568 nir_variable *available =
569 nir_local_variable_create(b.impl, glsl_bool_type(), "available");
570
571 nir_store_var(&b, result,
572 nir_vec2(&b, nir_imm_int64(&b, 0),
573 nir_imm_int64(&b, 0)), 0x3);
574 nir_store_var(&b, available, nir_imm_false(&b), 0x1);
575
576 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
577
578 /* Load resources. */
579 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
580 nir_intrinsic_vulkan_resource_index);
581 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
582 nir_intrinsic_set_desc_set(dst_buf, 0);
583 nir_intrinsic_set_binding(dst_buf, 0);
584 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
585 nir_builder_instr_insert(&b, &dst_buf->instr);
586
587 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
588 nir_intrinsic_vulkan_resource_index);
589 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
590 nir_intrinsic_set_desc_set(src_buf, 0);
591 nir_intrinsic_set_binding(src_buf, 1);
592 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
593 nir_builder_instr_insert(&b, &src_buf->instr);
594
595 /* Compute global ID. */
596 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
597 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
598 nir_ssa_def *block_size = nir_imm_ivec4(&b,
599 b.shader->info.cs.local_size[0],
600 b.shader->info.cs.local_size[1],
601 b.shader->info.cs.local_size[2], 0);
602 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
603 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
604
605 /* Compute src/dst strides. */
606 nir_ssa_def *input_stride = nir_imm_int(&b, 32);
607 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
608 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
609 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
610
611 /* Load data from the query pool. */
612 nir_intrinsic_instr *load1 = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
613 load1->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
614 load1->src[1] = nir_src_for_ssa(input_base);
615 nir_ssa_dest_init(&load1->instr, &load1->dest, 4, 32, NULL);
616 load1->num_components = 4;
617 nir_builder_instr_insert(&b, &load1->instr);
618
619 nir_intrinsic_instr *load2 = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
620 load2->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
621 load2->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base, nir_imm_int(&b, 16)));
622 nir_ssa_dest_init(&load2->instr, &load2->dest, 4, 32, NULL);
623 load2->num_components = 4;
624 nir_builder_instr_insert(&b, &load2->instr);
625
626 /* Check if result is available. */
627 nir_ssa_def *avails[2];
628 avails[0] = nir_iand(&b, nir_channel(&b, &load1->dest.ssa, 1),
629 nir_channel(&b, &load1->dest.ssa, 3));
630 avails[1] = nir_iand(&b, nir_channel(&b, &load2->dest.ssa, 1),
631 nir_channel(&b, &load2->dest.ssa, 3));
632 nir_ssa_def *result_is_available =
633 nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]),
634 nir_imm_int(&b, 0x80000000)));
635
636 /* Only compute result if available. */
637 nir_if *available_if = nir_if_create(b.shader);
638 available_if->condition = nir_src_for_ssa(result_is_available);
639 nir_cf_node_insert(b.cursor, &available_if->cf_node);
640
641 b.cursor = nir_after_cf_list(&available_if->then_list);
642
643 /* Pack values. */
644 nir_ssa_def *packed64[4];
645 packed64[0] = nir_pack_64_2x32(&b, nir_vec2(&b,
646 nir_channel(&b, &load1->dest.ssa, 0),
647 nir_channel(&b, &load1->dest.ssa, 1)));
648 packed64[1] = nir_pack_64_2x32(&b, nir_vec2(&b,
649 nir_channel(&b, &load1->dest.ssa, 2),
650 nir_channel(&b, &load1->dest.ssa, 3)));
651 packed64[2] = nir_pack_64_2x32(&b, nir_vec2(&b,
652 nir_channel(&b, &load2->dest.ssa, 0),
653 nir_channel(&b, &load2->dest.ssa, 1)));
654 packed64[3] = nir_pack_64_2x32(&b, nir_vec2(&b,
655 nir_channel(&b, &load2->dest.ssa, 2),
656 nir_channel(&b, &load2->dest.ssa, 3)));
657
658 /* Compute result. */
659 nir_ssa_def *num_primitive_written =
660 nir_isub(&b, packed64[3], packed64[1]);
661 nir_ssa_def *primitive_storage_needed =
662 nir_isub(&b, packed64[2], packed64[0]);
663
664 nir_store_var(&b, result,
665 nir_vec2(&b, num_primitive_written,
666 primitive_storage_needed), 0x3);
667 nir_store_var(&b, available, nir_imm_true(&b), 0x1);
668
669 b.cursor = nir_after_cf_node(&available_if->cf_node);
670
671 /* Determine if result is 64 or 32 bit. */
672 nir_ssa_def *result_is_64bit =
673 nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
674 nir_ssa_def *result_size =
675 nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16),
676 nir_imm_int(&b, 8));
677
678 /* Store the result if complete or partial results have been requested. */
679 nir_if *store_if = nir_if_create(b.shader);
680 store_if->condition =
681 nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
682 nir_load_var(&b, available)));
683 nir_cf_node_insert(b.cursor, &store_if->cf_node);
684
685 b.cursor = nir_after_cf_list(&store_if->then_list);
686
687 /* Store result. */
688 nir_if *store_64bit_if = nir_if_create(b.shader);
689 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
690 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
691
692 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
693
694 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
695 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
696 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
697 store->src[2] = nir_src_for_ssa(output_base);
698 nir_intrinsic_set_write_mask(store, 0x3);
699 store->num_components = 2;
700 nir_builder_instr_insert(&b, &store->instr);
701
702 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
703
704 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
705 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
706 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
707 store->src[2] = nir_src_for_ssa(output_base);
708 nir_intrinsic_set_write_mask(store, 0x3);
709 store->num_components = 2;
710 nir_builder_instr_insert(&b, &store->instr);
711
712 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
713
714 b.cursor = nir_after_cf_node(&store_if->cf_node);
715
716 /* Store the availability bit if requested. */
717 nir_if *availability_if = nir_if_create(b.shader);
718 availability_if->condition =
719 nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
720 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
721
722 b.cursor = nir_after_cf_list(&availability_if->then_list);
723
724 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
725 store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
726 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
727 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
728 nir_intrinsic_set_write_mask(store, 0x1);
729 store->num_components = 1;
730 nir_builder_instr_insert(&b, &store->instr);
731
732 b.cursor = nir_after_cf_node(&availability_if->cf_node);
733
734 return b.shader;
735 }
736
737 static VkResult radv_device_init_meta_query_state_internal(struct radv_device *device)
738 {
739 VkResult result;
740 struct radv_shader_module occlusion_cs = { .nir = NULL };
741 struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
742 struct radv_shader_module tfb_cs = { .nir = NULL };
743
744 mtx_lock(&device->meta_state.mtx);
745 if (device->meta_state.query.pipeline_statistics_query_pipeline) {
746 mtx_unlock(&device->meta_state.mtx);
747 return VK_SUCCESS;
748 }
749 occlusion_cs.nir = build_occlusion_query_shader(device);
750 pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
751 tfb_cs.nir = build_tfb_query_shader(device);
752
753 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
754 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
755 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
756 .bindingCount = 2,
757 .pBindings = (VkDescriptorSetLayoutBinding[]) {
758 {
759 .binding = 0,
760 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
761 .descriptorCount = 1,
762 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
763 .pImmutableSamplers = NULL
764 },
765 {
766 .binding = 1,
767 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
768 .descriptorCount = 1,
769 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
770 .pImmutableSamplers = NULL
771 },
772 }
773 };
774
775 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
776 &occlusion_ds_create_info,
777 &device->meta_state.alloc,
778 &device->meta_state.query.ds_layout);
779 if (result != VK_SUCCESS)
780 goto fail;
781
782 VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
783 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
784 .setLayoutCount = 1,
785 .pSetLayouts = &device->meta_state.query.ds_layout,
786 .pushConstantRangeCount = 1,
787 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
788 };
789
790 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
791 &occlusion_pl_create_info,
792 &device->meta_state.alloc,
793 &device->meta_state.query.p_layout);
794 if (result != VK_SUCCESS)
795 goto fail;
796
797 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
798 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
799 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
800 .module = radv_shader_module_to_handle(&occlusion_cs),
801 .pName = "main",
802 .pSpecializationInfo = NULL,
803 };
804
805 VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
806 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
807 .stage = occlusion_pipeline_shader_stage,
808 .flags = 0,
809 .layout = device->meta_state.query.p_layout,
810 };
811
812 result = radv_CreateComputePipelines(radv_device_to_handle(device),
813 radv_pipeline_cache_to_handle(&device->meta_state.cache),
814 1, &occlusion_vk_pipeline_info, NULL,
815 &device->meta_state.query.occlusion_query_pipeline);
816 if (result != VK_SUCCESS)
817 goto fail;
818
819 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
820 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
821 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
822 .module = radv_shader_module_to_handle(&pipeline_statistics_cs),
823 .pName = "main",
824 .pSpecializationInfo = NULL,
825 };
826
827 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
828 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
829 .stage = pipeline_statistics_pipeline_shader_stage,
830 .flags = 0,
831 .layout = device->meta_state.query.p_layout,
832 };
833
834 result = radv_CreateComputePipelines(radv_device_to_handle(device),
835 radv_pipeline_cache_to_handle(&device->meta_state.cache),
836 1, &pipeline_statistics_vk_pipeline_info, NULL,
837 &device->meta_state.query.pipeline_statistics_query_pipeline);
838 if (result != VK_SUCCESS)
839 goto fail;
840
841 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage = {
842 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
843 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
844 .module = radv_shader_module_to_handle(&tfb_cs),
845 .pName = "main",
846 .pSpecializationInfo = NULL,
847 };
848
849 VkComputePipelineCreateInfo tfb_pipeline_info = {
850 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
851 .stage = tfb_pipeline_shader_stage,
852 .flags = 0,
853 .layout = device->meta_state.query.p_layout,
854 };
855
856 result = radv_CreateComputePipelines(radv_device_to_handle(device),
857 radv_pipeline_cache_to_handle(&device->meta_state.cache),
858 1, &tfb_pipeline_info, NULL,
859 &device->meta_state.query.tfb_query_pipeline);
860 fail:
861 if (result != VK_SUCCESS)
862 radv_device_finish_meta_query_state(device);
863 ralloc_free(occlusion_cs.nir);
864 ralloc_free(pipeline_statistics_cs.nir);
865 ralloc_free(tfb_cs.nir);
866 mtx_unlock(&device->meta_state.mtx);
867 return result;
868 }
869
870 VkResult radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
871 {
872 if (on_demand)
873 return VK_SUCCESS;
874
875 return radv_device_init_meta_query_state_internal(device);
876 }
877
878 void radv_device_finish_meta_query_state(struct radv_device *device)
879 {
880 if (device->meta_state.query.tfb_query_pipeline)
881 radv_DestroyPipeline(radv_device_to_handle(device),
882 device->meta_state.query.tfb_query_pipeline,
883 &device->meta_state.alloc);
884
885 if (device->meta_state.query.pipeline_statistics_query_pipeline)
886 radv_DestroyPipeline(radv_device_to_handle(device),
887 device->meta_state.query.pipeline_statistics_query_pipeline,
888 &device->meta_state.alloc);
889
890 if (device->meta_state.query.occlusion_query_pipeline)
891 radv_DestroyPipeline(radv_device_to_handle(device),
892 device->meta_state.query.occlusion_query_pipeline,
893 &device->meta_state.alloc);
894
895 if (device->meta_state.query.p_layout)
896 radv_DestroyPipelineLayout(radv_device_to_handle(device),
897 device->meta_state.query.p_layout,
898 &device->meta_state.alloc);
899
900 if (device->meta_state.query.ds_layout)
901 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
902 device->meta_state.query.ds_layout,
903 &device->meta_state.alloc);
904 }
905
906 static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
907 VkPipeline *pipeline,
908 struct radeon_winsys_bo *src_bo,
909 struct radeon_winsys_bo *dst_bo,
910 uint64_t src_offset, uint64_t dst_offset,
911 uint32_t src_stride, uint32_t dst_stride,
912 uint32_t count, uint32_t flags,
913 uint32_t pipeline_stats_mask, uint32_t avail_offset)
914 {
915 struct radv_device *device = cmd_buffer->device;
916 struct radv_meta_saved_state saved_state;
917 bool old_predicating;
918
919 if (!*pipeline) {
920 VkResult ret = radv_device_init_meta_query_state_internal(device);
921 if (ret != VK_SUCCESS) {
922 cmd_buffer->record_result = ret;
923 return;
924 }
925 }
926
927 radv_meta_save(&saved_state, cmd_buffer,
928 RADV_META_SAVE_COMPUTE_PIPELINE |
929 RADV_META_SAVE_CONSTANTS |
930 RADV_META_SAVE_DESCRIPTORS);
931
932 /* VK_EXT_conditional_rendering says that copy commands should not be
933 * affected by conditional rendering.
934 */
935 old_predicating = cmd_buffer->state.predicating;
936 cmd_buffer->state.predicating = false;
937
938 struct radv_buffer dst_buffer = {
939 .bo = dst_bo,
940 .offset = dst_offset,
941 .size = dst_stride * count
942 };
943
944 struct radv_buffer src_buffer = {
945 .bo = src_bo,
946 .offset = src_offset,
947 .size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
948 };
949
950 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
951 VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
952
953 radv_meta_push_descriptor_set(cmd_buffer,
954 VK_PIPELINE_BIND_POINT_COMPUTE,
955 device->meta_state.query.p_layout,
956 0, /* set */
957 2, /* descriptorWriteCount */
958 (VkWriteDescriptorSet[]) {
959 {
960 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
961 .dstBinding = 0,
962 .dstArrayElement = 0,
963 .descriptorCount = 1,
964 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
965 .pBufferInfo = &(VkDescriptorBufferInfo) {
966 .buffer = radv_buffer_to_handle(&dst_buffer),
967 .offset = 0,
968 .range = VK_WHOLE_SIZE
969 }
970 },
971 {
972 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
973 .dstBinding = 1,
974 .dstArrayElement = 0,
975 .descriptorCount = 1,
976 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
977 .pBufferInfo = &(VkDescriptorBufferInfo) {
978 .buffer = radv_buffer_to_handle(&src_buffer),
979 .offset = 0,
980 .range = VK_WHOLE_SIZE
981 }
982 }
983 });
984
985 /* Encode the number of elements for easy access by the shader. */
986 pipeline_stats_mask &= 0x7ff;
987 pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
988
989 avail_offset -= src_offset;
990
991 struct {
992 uint32_t flags;
993 uint32_t dst_stride;
994 uint32_t pipeline_stats_mask;
995 uint32_t avail_offset;
996 } push_constants = {
997 flags,
998 dst_stride,
999 pipeline_stats_mask,
1000 avail_offset
1001 };
1002
1003 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
1004 device->meta_state.query.p_layout,
1005 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
1006 &push_constants);
1007
1008 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
1009 RADV_CMD_FLAG_INV_VMEM_L1;
1010
1011 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1012 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
1013
1014 radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
1015
1016 /* Restore conditional rendering. */
1017 cmd_buffer->state.predicating = old_predicating;
1018
1019 radv_meta_restore(&saved_state, cmd_buffer);
1020 }
1021
1022 VkResult radv_CreateQueryPool(
1023 VkDevice _device,
1024 const VkQueryPoolCreateInfo* pCreateInfo,
1025 const VkAllocationCallbacks* pAllocator,
1026 VkQueryPool* pQueryPool)
1027 {
1028 RADV_FROM_HANDLE(radv_device, device, _device);
1029 struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
1030 sizeof(*pool), 8,
1031 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1032 uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
1033 ? TIMESTAMP_NOT_READY : 0;
1034
1035 if (!pool)
1036 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1037
1038
1039 switch(pCreateInfo->queryType) {
1040 case VK_QUERY_TYPE_OCCLUSION:
1041 pool->stride = 16 * get_max_db(device);
1042 break;
1043 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1044 pool->stride = pipelinestat_block_size * 2;
1045 break;
1046 case VK_QUERY_TYPE_TIMESTAMP:
1047 pool->stride = 8;
1048 break;
1049 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1050 pool->stride = 32;
1051 break;
1052 default:
1053 unreachable("creating unhandled query type");
1054 }
1055
1056 pool->type = pCreateInfo->queryType;
1057 pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
1058 pool->availability_offset = pool->stride * pCreateInfo->queryCount;
1059 pool->size = pool->availability_offset;
1060 if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
1061 pool->size += 4 * pCreateInfo->queryCount;
1062
1063 pool->bo = device->ws->buffer_create(device->ws, pool->size,
1064 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING,
1065 RADV_BO_PRIORITY_QUERY_POOL);
1066
1067 if (!pool->bo) {
1068 vk_free2(&device->alloc, pAllocator, pool);
1069 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1070 }
1071
1072 pool->ptr = device->ws->buffer_map(pool->bo);
1073
1074 if (!pool->ptr) {
1075 device->ws->buffer_destroy(pool->bo);
1076 vk_free2(&device->alloc, pAllocator, pool);
1077 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1078 }
1079 memset(pool->ptr, initial_value, pool->size);
1080
1081 *pQueryPool = radv_query_pool_to_handle(pool);
1082 return VK_SUCCESS;
1083 }
1084
1085 void radv_DestroyQueryPool(
1086 VkDevice _device,
1087 VkQueryPool _pool,
1088 const VkAllocationCallbacks* pAllocator)
1089 {
1090 RADV_FROM_HANDLE(radv_device, device, _device);
1091 RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
1092
1093 if (!pool)
1094 return;
1095
1096 device->ws->buffer_destroy(pool->bo);
1097 vk_free2(&device->alloc, pAllocator, pool);
1098 }
1099
1100 VkResult radv_GetQueryPoolResults(
1101 VkDevice _device,
1102 VkQueryPool queryPool,
1103 uint32_t firstQuery,
1104 uint32_t queryCount,
1105 size_t dataSize,
1106 void* pData,
1107 VkDeviceSize stride,
1108 VkQueryResultFlags flags)
1109 {
1110 RADV_FROM_HANDLE(radv_device, device, _device);
1111 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1112 char *data = pData;
1113 VkResult result = VK_SUCCESS;
1114
1115 for(unsigned i = 0; i < queryCount; ++i, data += stride) {
1116 char *dest = data;
1117 unsigned query = firstQuery + i;
1118 char *src = pool->ptr + query * pool->stride;
1119 uint32_t available;
1120
1121 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1122 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1123 while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
1124 ;
1125 available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
1126 }
1127
1128 switch (pool->type) {
1129 case VK_QUERY_TYPE_TIMESTAMP: {
1130 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
1131
1132 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1133 while (*(volatile uint64_t *)src == TIMESTAMP_NOT_READY)
1134 ;
1135 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
1136 }
1137
1138 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
1139 result = VK_NOT_READY;
1140 break;
1141
1142 }
1143
1144 if (flags & VK_QUERY_RESULT_64_BIT) {
1145 *(uint64_t*)dest = *(uint64_t*)src;
1146 dest += 8;
1147 } else {
1148 *(uint32_t*)dest = *(uint32_t*)src;
1149 dest += 4;
1150 }
1151 break;
1152 }
1153 case VK_QUERY_TYPE_OCCLUSION: {
1154 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
1155 uint64_t sample_count = 0;
1156 int db_count = get_max_db(device);
1157 available = 1;
1158
1159 for (int i = 0; i < db_count; ++i) {
1160 uint64_t start, end;
1161 do {
1162 start = src64[2 * i];
1163 end = src64[2 * i + 1];
1164 } while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
1165
1166 if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
1167 available = 0;
1168 else {
1169 sample_count += end - start;
1170 }
1171 }
1172
1173 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
1174 result = VK_NOT_READY;
1175 break;
1176
1177 }
1178
1179 if (flags & VK_QUERY_RESULT_64_BIT) {
1180 *(uint64_t*)dest = sample_count;
1181 dest += 8;
1182 } else {
1183 *(uint32_t*)dest = sample_count;
1184 dest += 4;
1185 }
1186 break;
1187 }
1188 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1189 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
1190 result = VK_NOT_READY;
1191 break;
1192
1193 }
1194
1195 const uint64_t *start = (uint64_t*)src;
1196 const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
1197 if (flags & VK_QUERY_RESULT_64_BIT) {
1198 uint64_t *dst = (uint64_t*)dest;
1199 dest += util_bitcount(pool->pipeline_stats_mask) * 8;
1200 for(int i = 0; i < 11; ++i)
1201 if(pool->pipeline_stats_mask & (1u << i))
1202 *dst++ = stop[pipeline_statistics_indices[i]] -
1203 start[pipeline_statistics_indices[i]];
1204
1205 } else {
1206 uint32_t *dst = (uint32_t*)dest;
1207 dest += util_bitcount(pool->pipeline_stats_mask) * 4;
1208 for(int i = 0; i < 11; ++i)
1209 if(pool->pipeline_stats_mask & (1u << i))
1210 *dst++ = stop[pipeline_statistics_indices[i]] -
1211 start[pipeline_statistics_indices[i]];
1212 }
1213 break;
1214 }
1215 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
1216 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
1217 uint64_t num_primitives_written;
1218 uint64_t primitive_storage_needed;
1219
1220 /* SAMPLE_STREAMOUTSTATS stores this structure:
1221 * {
1222 * u64 NumPrimitivesWritten;
1223 * u64 PrimitiveStorageNeeded;
1224 * }
1225 */
1226 available = 1;
1227 for (int j = 0; j < 4; j++) {
1228 if (!(src64[j] & 0x8000000000000000UL))
1229 available = 0;
1230 }
1231
1232 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
1233 result = VK_NOT_READY;
1234 break;
1235 }
1236
1237 num_primitives_written = src64[3] - src64[1];
1238 primitive_storage_needed = src64[2] - src64[0];
1239
1240 if (flags & VK_QUERY_RESULT_64_BIT) {
1241 *(uint64_t *)dest = num_primitives_written;
1242 dest += 8;
1243 *(uint64_t *)dest = primitive_storage_needed;
1244 dest += 8;
1245 } else {
1246 *(uint32_t *)dest = num_primitives_written;
1247 dest += 4;
1248 *(uint32_t *)dest = primitive_storage_needed;
1249 dest += 4;
1250 }
1251 break;
1252 }
1253 default:
1254 unreachable("trying to get results of unhandled query type");
1255 }
1256
1257 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1258 if (flags & VK_QUERY_RESULT_64_BIT) {
1259 *(uint64_t*)dest = available;
1260 } else {
1261 *(uint32_t*)dest = available;
1262 }
1263 }
1264 }
1265
1266 return result;
1267 }
1268
1269 void radv_CmdCopyQueryPoolResults(
1270 VkCommandBuffer commandBuffer,
1271 VkQueryPool queryPool,
1272 uint32_t firstQuery,
1273 uint32_t queryCount,
1274 VkBuffer dstBuffer,
1275 VkDeviceSize dstOffset,
1276 VkDeviceSize stride,
1277 VkQueryResultFlags flags)
1278 {
1279 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1280 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1281 RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
1282 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1283 unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
1284 uint64_t va = radv_buffer_get_va(pool->bo);
1285 uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
1286 dest_va += dst_buffer->offset + dstOffset;
1287
1288 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
1289 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
1290
1291 switch (pool->type) {
1292 case VK_QUERY_TYPE_OCCLUSION:
1293 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1294 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1295 unsigned query = firstQuery + i;
1296 uint64_t src_va = va + query * pool->stride + pool->stride - 4;
1297
1298 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1299
1300 /* Waits on the upper word of the last DB entry */
1301 radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL,
1302 src_va, 0x80000000, 0xffffffff);
1303 }
1304 }
1305 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
1306 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1307 dst_buffer->offset + dstOffset,
1308 pool->stride, stride,
1309 queryCount, flags, 0, 0);
1310 break;
1311 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1312 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1313 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1314 unsigned query = firstQuery + i;
1315
1316 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1317
1318 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1319
1320 /* This waits on the ME. All copies below are done on the ME */
1321 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL,
1322 avail_va, 1, 0xffffffff);
1323 }
1324 }
1325 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1326 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1327 dst_buffer->offset + dstOffset,
1328 pool->stride, stride, queryCount, flags,
1329 pool->pipeline_stats_mask,
1330 pool->availability_offset + 4 * firstQuery);
1331 break;
1332 case VK_QUERY_TYPE_TIMESTAMP:
1333 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1334 unsigned query = firstQuery + i;
1335 uint64_t local_src_va = va + query * pool->stride;
1336
1337 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
1338
1339
1340 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1341 /* Wait on the high 32 bits of the timestamp in
1342 * case the low part is 0xffffffff.
1343 */
1344 radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL,
1345 local_src_va + 4,
1346 TIMESTAMP_NOT_READY >> 32,
1347 0xffffffff);
1348 }
1349 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1350 uint64_t avail_dest_va = dest_va + elem_size;
1351
1352 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1353 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1354 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM));
1355 radeon_emit(cs, local_src_va);
1356 radeon_emit(cs, local_src_va >> 32);
1357 radeon_emit(cs, avail_dest_va);
1358 radeon_emit(cs, avail_dest_va >> 32);
1359 }
1360
1361 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1362 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1363 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM) |
1364 ((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
1365 radeon_emit(cs, local_src_va);
1366 radeon_emit(cs, local_src_va >> 32);
1367 radeon_emit(cs, dest_va);
1368 radeon_emit(cs, dest_va >> 32);
1369
1370
1371 assert(cs->cdw <= cdw_max);
1372 }
1373 break;
1374 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1375 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1376 for(unsigned i = 0; i < queryCount; i++) {
1377 unsigned query = firstQuery + i;
1378 uint64_t src_va = va + query * pool->stride;
1379
1380 radeon_check_space(cmd_buffer->device->ws, cs, 7 * 4);
1381
1382 /* Wait on the upper word of all results. */
1383 for (unsigned j = 0; j < 4; j++, src_va += 8) {
1384 radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL,
1385 src_va + 4, 0x80000000,
1386 0xffffffff);
1387 }
1388 }
1389 }
1390
1391 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.tfb_query_pipeline,
1392 pool->bo, dst_buffer->bo,
1393 firstQuery * pool->stride,
1394 dst_buffer->offset + dstOffset,
1395 pool->stride, stride,
1396 queryCount, flags, 0, 0);
1397 break;
1398 default:
1399 unreachable("trying to get results of unhandled query type");
1400 }
1401
1402 }
1403
1404 void radv_CmdResetQueryPool(
1405 VkCommandBuffer commandBuffer,
1406 VkQueryPool queryPool,
1407 uint32_t firstQuery,
1408 uint32_t queryCount)
1409 {
1410 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1411 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1412 uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
1413 ? TIMESTAMP_NOT_READY : 0;
1414 uint32_t flush_bits = 0;
1415
1416 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1417 firstQuery * pool->stride,
1418 queryCount * pool->stride, value);
1419
1420 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1421 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1422 pool->availability_offset + firstQuery * 4,
1423 queryCount * 4, 0);
1424 }
1425
1426 if (flush_bits) {
1427 /* Only need to flush caches for the compute shader path. */
1428 cmd_buffer->pending_reset_query = true;
1429 cmd_buffer->state.flush_bits |= flush_bits;
1430 }
1431 }
1432
1433 static unsigned event_type_for_stream(unsigned stream)
1434 {
1435 switch (stream) {
1436 default:
1437 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
1438 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
1439 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
1440 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
1441 }
1442 }
1443
1444 static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
1445 struct radv_query_pool *pool)
1446 {
1447 if (cmd_buffer->pending_reset_query) {
1448 if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
1449 /* Only need to flush caches if the query pool size is
1450 * large enough to be resetted using the compute shader
1451 * path. Small pools don't need any cache flushes
1452 * because we use a CP dma clear.
1453 */
1454 si_emit_cache_flush(cmd_buffer);
1455 }
1456 }
1457 }
1458
1459 static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
1460 uint64_t va,
1461 VkQueryType query_type,
1462 VkQueryControlFlags flags,
1463 uint32_t index)
1464 {
1465 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1466 switch (query_type) {
1467 case VK_QUERY_TYPE_OCCLUSION:
1468 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1469
1470 ++cmd_buffer->state.active_occlusion_queries;
1471 if (cmd_buffer->state.active_occlusion_queries == 1) {
1472 if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
1473 /* This is the first occlusion query, enable
1474 * the hint if the precision bit is set.
1475 */
1476 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1477 }
1478
1479 radv_set_db_count_control(cmd_buffer);
1480 } else {
1481 if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
1482 !cmd_buffer->state.perfect_occlusion_queries_enabled) {
1483 /* This is not the first query, but this one
1484 * needs to enable precision, DB_COUNT_CONTROL
1485 * has to be updated accordingly.
1486 */
1487 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1488
1489 radv_set_db_count_control(cmd_buffer);
1490 }
1491 }
1492
1493 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1494 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1495 radeon_emit(cs, va);
1496 radeon_emit(cs, va >> 32);
1497 break;
1498 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1499 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1500
1501 ++cmd_buffer->state.active_pipeline_queries;
1502 if (cmd_buffer->state.active_pipeline_queries == 1) {
1503 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1504 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
1505 }
1506
1507 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1508 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1509 radeon_emit(cs, va);
1510 radeon_emit(cs, va >> 32);
1511 break;
1512 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1513 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1514
1515 assert(index < MAX_SO_STREAMS);
1516
1517 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1518 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1519 radeon_emit(cs, va);
1520 radeon_emit(cs, va >> 32);
1521 break;
1522 default:
1523 unreachable("beginning unhandled query type");
1524 }
1525
1526 }
1527
1528 static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
1529 uint64_t va, uint64_t avail_va,
1530 VkQueryType query_type, uint32_t index)
1531 {
1532 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1533 switch (query_type) {
1534 case VK_QUERY_TYPE_OCCLUSION:
1535 radeon_check_space(cmd_buffer->device->ws, cs, 14);
1536
1537 cmd_buffer->state.active_occlusion_queries--;
1538 if (cmd_buffer->state.active_occlusion_queries == 0) {
1539 radv_set_db_count_control(cmd_buffer);
1540
1541 /* Reset the perfect occlusion queries hint now that no
1542 * queries are active.
1543 */
1544 cmd_buffer->state.perfect_occlusion_queries_enabled = false;
1545 }
1546
1547 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1548 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1549 radeon_emit(cs, va + 8);
1550 radeon_emit(cs, (va + 8) >> 32);
1551
1552 break;
1553 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1554 radeon_check_space(cmd_buffer->device->ws, cs, 16);
1555
1556 cmd_buffer->state.active_pipeline_queries--;
1557 if (cmd_buffer->state.active_pipeline_queries == 0) {
1558 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
1559 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1560 }
1561 va += pipelinestat_block_size;
1562
1563 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1564 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1565 radeon_emit(cs, va);
1566 radeon_emit(cs, va >> 32);
1567
1568 si_cs_emit_write_event_eop(cs,
1569 cmd_buffer->device->physical_device->rad_info.chip_class,
1570 radv_cmd_buffer_uses_mec(cmd_buffer),
1571 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1572 EOP_DATA_SEL_VALUE_32BIT,
1573 avail_va, 1,
1574 cmd_buffer->gfx9_eop_bug_va);
1575 break;
1576 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1577 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1578
1579 assert(index < MAX_SO_STREAMS);
1580
1581 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1582 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1583 radeon_emit(cs, (va + 16));
1584 radeon_emit(cs, (va + 16) >> 32);
1585 break;
1586 default:
1587 unreachable("ending unhandled query type");
1588 }
1589 }
1590
1591 void radv_CmdBeginQueryIndexedEXT(
1592 VkCommandBuffer commandBuffer,
1593 VkQueryPool queryPool,
1594 uint32_t query,
1595 VkQueryControlFlags flags,
1596 uint32_t index)
1597 {
1598 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1599 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1600 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1601 uint64_t va = radv_buffer_get_va(pool->bo);
1602
1603 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1604
1605 emit_query_flush(cmd_buffer, pool);
1606
1607 va += pool->stride * query;
1608
1609 emit_begin_query(cmd_buffer, va, pool->type, flags, index);
1610 }
1611
1612 void radv_CmdBeginQuery(
1613 VkCommandBuffer commandBuffer,
1614 VkQueryPool queryPool,
1615 uint32_t query,
1616 VkQueryControlFlags flags)
1617 {
1618 radv_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1619 }
1620
1621 void radv_CmdEndQueryIndexedEXT(
1622 VkCommandBuffer commandBuffer,
1623 VkQueryPool queryPool,
1624 uint32_t query,
1625 uint32_t index)
1626 {
1627 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1628 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1629 uint64_t va = radv_buffer_get_va(pool->bo);
1630 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1631 va += pool->stride * query;
1632
1633 /* Do not need to add the pool BO to the list because the query must
1634 * currently be active, which means the BO is already in the list.
1635 */
1636 emit_end_query(cmd_buffer, va, avail_va, pool->type, index);
1637
1638 /*
1639 * For multiview we have to emit a query for each bit in the mask,
1640 * however the first query we emit will get the totals for all the
1641 * operations, so we don't want to get a real value in the other
1642 * queries. This emits a fake begin/end sequence so the waiting
1643 * code gets a completed query value and doesn't hang, but the
1644 * query returns 0.
1645 */
1646 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
1647 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1648
1649
1650 for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
1651 va += pool->stride;
1652 avail_va += 4;
1653 emit_begin_query(cmd_buffer, va, pool->type, 0, 0);
1654 emit_end_query(cmd_buffer, va, avail_va, pool->type, 0);
1655 }
1656 }
1657 }
1658
1659 void radv_CmdEndQuery(
1660 VkCommandBuffer commandBuffer,
1661 VkQueryPool queryPool,
1662 uint32_t query)
1663 {
1664 radv_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1665 }
1666
1667 void radv_CmdWriteTimestamp(
1668 VkCommandBuffer commandBuffer,
1669 VkPipelineStageFlagBits pipelineStage,
1670 VkQueryPool queryPool,
1671 uint32_t query)
1672 {
1673 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1674 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1675 bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1676 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1677 uint64_t va = radv_buffer_get_va(pool->bo);
1678 uint64_t query_va = va + pool->stride * query;
1679
1680 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1681
1682 emit_query_flush(cmd_buffer, pool);
1683
1684 int num_queries = 1;
1685 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
1686 num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
1687
1688 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
1689
1690 for (unsigned i = 0; i < num_queries; i++) {
1691 switch(pipelineStage) {
1692 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1693 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1694 radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1695 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
1696 COPY_DATA_DST_SEL(V_370_MEM));
1697 radeon_emit(cs, 0);
1698 radeon_emit(cs, 0);
1699 radeon_emit(cs, query_va);
1700 radeon_emit(cs, query_va >> 32);
1701 break;
1702 default:
1703 si_cs_emit_write_event_eop(cs,
1704 cmd_buffer->device->physical_device->rad_info.chip_class,
1705 mec,
1706 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1707 EOP_DATA_SEL_TIMESTAMP,
1708 query_va, 0,
1709 cmd_buffer->gfx9_eop_bug_va);
1710 break;
1711 }
1712 query_va += pool->stride;
1713 }
1714 assert(cmd_buffer->cs->cdw <= cdw_max);
1715 }