radv: Add logic for multisample format descriptions.
[mesa.git] / src / amd / vulkan / radv_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
35 #include "radv_cs.h"
36 #include "sid.h"
37
38 #define TIMESTAMP_NOT_READY UINT64_MAX
39
40 static const int pipelinestat_block_size = 11 * 8;
41 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
42
43 static unsigned get_max_db(struct radv_device *device)
44 {
45 unsigned num_db = device->physical_device->rad_info.num_render_backends;
46 MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
47
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask == ((1ull << num_db) - 1));
50
51 return num_db;
52 }
53
54
55 static nir_ssa_def *nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
56 {
57 return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
58 }
59
60 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
61 {
62 nir_ssa_def *counter = nir_load_var(b, var);
63
64 nir_if *if_stmt = nir_if_create(b->shader);
65 if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
66 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
67
68 b->cursor = nir_after_cf_list(&if_stmt->then_list);
69
70 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
71 nir_builder_instr_insert(b, &instr->instr);
72
73 b->cursor = nir_after_cf_node(&if_stmt->cf_node);
74 counter = nir_iadd(b, counter, nir_imm_int(b, 1));
75 nir_store_var(b, var, counter, 0x1);
76 }
77
78 static struct nir_ssa_def *
79 radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
80 {
81 nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
82 nir_intrinsic_set_base(flags, 0);
83 nir_intrinsic_set_range(flags, 16);
84 flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
85 flags->num_components = 1;
86 nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
87 nir_builder_instr_insert(b, &flags->instr);
88 return &flags->dest.ssa;
89 }
90
91 static nir_shader *
92 build_occlusion_query_shader(struct radv_device *device) {
93 /* the shader this builds is roughly
94 *
95 * push constants {
96 * uint32_t flags;
97 * uint32_t dst_stride;
98 * };
99 *
100 * uint32_t src_stride = 16 * db_count;
101 *
102 * location(binding = 0) buffer dst_buf;
103 * location(binding = 1) buffer src_buf;
104 *
105 * void main() {
106 * uint64_t result = 0;
107 * uint64_t src_offset = src_stride * global_id.x;
108 * uint64_t dst_offset = dst_stride * global_id.x;
109 * bool available = true;
110 * for (int i = 0; i < db_count; ++i) {
111 * uint64_t start = src_buf[src_offset + 16 * i];
112 * uint64_t end = src_buf[src_offset + 16 * i + 8];
113 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
114 * result += end - start;
115 * else
116 * available = false;
117 * }
118 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
119 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
120 * if (flags & VK_QUERY_RESULT_64_BIT)
121 * dst_buf[dst_offset] = result;
122 * else
123 * dst_buf[dst_offset] = (uint32_t)result.
124 * }
125 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
126 * dst_buf[dst_offset + elem_size] = available;
127 * }
128 * }
129 */
130 nir_builder b;
131 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
132 b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
133 b.shader->info.cs.local_size[0] = 64;
134 b.shader->info.cs.local_size[1] = 1;
135 b.shader->info.cs.local_size[2] = 1;
136
137 nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
138 nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
139 nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
140 nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
141 nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
142 unsigned db_count = get_max_db(device);
143
144 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
145
146 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
147 nir_intrinsic_vulkan_resource_index);
148 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
149 dst_buf->num_components = 1;
150 nir_intrinsic_set_desc_set(dst_buf, 0);
151 nir_intrinsic_set_binding(dst_buf, 0);
152 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
153 nir_builder_instr_insert(&b, &dst_buf->instr);
154
155 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
156 nir_intrinsic_vulkan_resource_index);
157 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
158 src_buf->num_components = 1;
159 nir_intrinsic_set_desc_set(src_buf, 0);
160 nir_intrinsic_set_binding(src_buf, 1);
161 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
162 nir_builder_instr_insert(&b, &src_buf->instr);
163
164 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
165 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
166 nir_ssa_def *block_size = nir_imm_ivec4(&b,
167 b.shader->info.cs.local_size[0],
168 b.shader->info.cs.local_size[1],
169 b.shader->info.cs.local_size[2], 0);
170 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
171 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
172
173 nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
174 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
175 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
176 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
177
178
179 nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
180 nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
181 nir_store_var(&b, available, nir_imm_true(&b), 0x1);
182
183 nir_loop *outer_loop = nir_loop_create(b.shader);
184 nir_builder_cf_insert(&b, &outer_loop->cf_node);
185 b.cursor = nir_after_cf_list(&outer_loop->body);
186
187 nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
188 radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
189
190 nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
191 load_offset = nir_iadd(&b, input_base, load_offset);
192
193 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
194 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
195 load->src[1] = nir_src_for_ssa(load_offset);
196 nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
197 load->num_components = 2;
198 nir_builder_instr_insert(&b, &load->instr);
199
200 nir_store_var(&b, start, nir_channel(&b, &load->dest.ssa, 0), 0x1);
201 nir_store_var(&b, end, nir_channel(&b, &load->dest.ssa, 1), 0x1);
202
203 nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
204 nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
205
206 nir_if *update_if = nir_if_create(b.shader);
207 update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
208 nir_cf_node_insert(b.cursor, &update_if->cf_node);
209
210 b.cursor = nir_after_cf_list(&update_if->then_list);
211
212 nir_store_var(&b, result,
213 nir_iadd(&b, nir_load_var(&b, result),
214 nir_isub(&b, nir_load_var(&b, end),
215 nir_load_var(&b, start))), 0x1);
216
217 b.cursor = nir_after_cf_list(&update_if->else_list);
218
219 nir_store_var(&b, available, nir_imm_false(&b), 0x1);
220
221 b.cursor = nir_after_cf_node(&outer_loop->cf_node);
222
223 /* Store the result if complete or if partial results have been requested. */
224
225 nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
226 nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
227
228 nir_if *store_if = nir_if_create(b.shader);
229 store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
230 nir_cf_node_insert(b.cursor, &store_if->cf_node);
231
232 b.cursor = nir_after_cf_list(&store_if->then_list);
233
234 nir_if *store_64bit_if = nir_if_create(b.shader);
235 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
236 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
237
238 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
239
240 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
241 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
242 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
243 store->src[2] = nir_src_for_ssa(output_base);
244 nir_intrinsic_set_write_mask(store, 0x1);
245 store->num_components = 1;
246 nir_builder_instr_insert(&b, &store->instr);
247
248 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
249
250 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
251 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
252 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
253 store->src[2] = nir_src_for_ssa(output_base);
254 nir_intrinsic_set_write_mask(store, 0x1);
255 store->num_components = 1;
256 nir_builder_instr_insert(&b, &store->instr);
257
258 b.cursor = nir_after_cf_node(&store_if->cf_node);
259
260 /* Store the availability bit if requested. */
261
262 nir_if *availability_if = nir_if_create(b.shader);
263 availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
264 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
265
266 b.cursor = nir_after_cf_list(&availability_if->then_list);
267
268 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
269 store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
270 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
271 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
272 nir_intrinsic_set_write_mask(store, 0x1);
273 store->num_components = 1;
274 nir_builder_instr_insert(&b, &store->instr);
275
276 return b.shader;
277 }
278
279 static nir_shader *
280 build_pipeline_statistics_query_shader(struct radv_device *device) {
281 /* the shader this builds is roughly
282 *
283 * push constants {
284 * uint32_t flags;
285 * uint32_t dst_stride;
286 * uint32_t stats_mask;
287 * uint32_t avail_offset;
288 * };
289 *
290 * uint32_t src_stride = pipelinestat_block_size * 2;
291 *
292 * location(binding = 0) buffer dst_buf;
293 * location(binding = 1) buffer src_buf;
294 *
295 * void main() {
296 * uint64_t src_offset = src_stride * global_id.x;
297 * uint64_t dst_base = dst_stride * global_id.x;
298 * uint64_t dst_offset = dst_base;
299 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
300 * uint32_t elem_count = stats_mask >> 16;
301 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
302 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
303 * dst_buf[dst_offset + elem_count * elem_size] = available32;
304 * }
305 * if ((bool)available32) {
306 * // repeat 11 times:
307 * if (stats_mask & (1 << 0)) {
308 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
309 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
310 * uint64_t result = end - start;
311 * if (flags & VK_QUERY_RESULT_64_BIT)
312 * dst_buf[dst_offset] = result;
313 * else
314 * dst_buf[dst_offset] = (uint32_t)result.
315 * dst_offset += elem_size;
316 * }
317 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
318 * // Set everything to 0 as we don't know what is valid.
319 * for (int i = 0; i < elem_count; ++i)
320 * dst_buf[dst_base + elem_size * i] = 0;
321 * }
322 * }
323 */
324 nir_builder b;
325 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
326 b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
327 b.shader->info.cs.local_size[0] = 64;
328 b.shader->info.cs.local_size[1] = 1;
329 b.shader->info.cs.local_size[2] = 1;
330
331 nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
332
333 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
334 nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
335 nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
336
337 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
338 nir_intrinsic_vulkan_resource_index);
339 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
340 dst_buf->num_components = 1;;
341 nir_intrinsic_set_desc_set(dst_buf, 0);
342 nir_intrinsic_set_binding(dst_buf, 0);
343 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
344 nir_builder_instr_insert(&b, &dst_buf->instr);
345
346 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
347 nir_intrinsic_vulkan_resource_index);
348 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
349 src_buf->num_components = 1;
350 nir_intrinsic_set_desc_set(src_buf, 0);
351 nir_intrinsic_set_binding(src_buf, 1);
352 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
353 nir_builder_instr_insert(&b, &src_buf->instr);
354
355 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
356 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
357 nir_ssa_def *block_size = nir_imm_ivec4(&b,
358 b.shader->info.cs.local_size[0],
359 b.shader->info.cs.local_size[1],
360 b.shader->info.cs.local_size[2], 0);
361 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
362 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
363
364 nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
365 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
366 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
367 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
368
369
370 avail_offset = nir_iadd(&b, avail_offset,
371 nir_imul(&b, global_id, nir_imm_int(&b, 4)));
372
373 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
374 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
375 load->src[1] = nir_src_for_ssa(avail_offset);
376 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
377 load->num_components = 1;
378 nir_builder_instr_insert(&b, &load->instr);
379 nir_ssa_def *available32 = &load->dest.ssa;
380
381 nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
382 nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
383 nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
384
385 /* Store the availability bit if requested. */
386
387 nir_if *availability_if = nir_if_create(b.shader);
388 availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
389 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
390
391 b.cursor = nir_after_cf_list(&availability_if->then_list);
392
393 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
394 store->src[0] = nir_src_for_ssa(available32);
395 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
396 store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
397 nir_intrinsic_set_write_mask(store, 0x1);
398 store->num_components = 1;
399 nir_builder_instr_insert(&b, &store->instr);
400
401 b.cursor = nir_after_cf_node(&availability_if->cf_node);
402
403 nir_if *available_if = nir_if_create(b.shader);
404 available_if->condition = nir_src_for_ssa(nir_i2b(&b, available32));
405 nir_cf_node_insert(b.cursor, &available_if->cf_node);
406
407 b.cursor = nir_after_cf_list(&available_if->then_list);
408
409 nir_store_var(&b, output_offset, output_base, 0x1);
410 for (int i = 0; i < 11; ++i) {
411 nir_if *store_if = nir_if_create(b.shader);
412 store_if->condition = nir_src_for_ssa(nir_test_flag(&b, stats_mask, 1u << i));
413 nir_cf_node_insert(b.cursor, &store_if->cf_node);
414
415 b.cursor = nir_after_cf_list(&store_if->then_list);
416
417 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
418 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
419 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
420 nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
421 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
422 load->num_components = 1;
423 nir_builder_instr_insert(&b, &load->instr);
424 nir_ssa_def *start = &load->dest.ssa;
425
426 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
427 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
428 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
429 nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
430 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
431 load->num_components = 1;
432 nir_builder_instr_insert(&b, &load->instr);
433 nir_ssa_def *end = &load->dest.ssa;
434
435 nir_ssa_def *result = nir_isub(&b, end, start);
436
437 /* Store result */
438 nir_if *store_64bit_if = nir_if_create(b.shader);
439 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
440 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
441
442 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
443
444 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
445 store->src[0] = nir_src_for_ssa(result);
446 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
447 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
448 nir_intrinsic_set_write_mask(store, 0x1);
449 store->num_components = 1;
450 nir_builder_instr_insert(&b, &store->instr);
451
452 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
453
454 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
455 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
456 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
457 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
458 nir_intrinsic_set_write_mask(store, 0x1);
459 store->num_components = 1;
460 nir_builder_instr_insert(&b, &store->instr);
461
462 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
463
464 nir_store_var(&b, output_offset,
465 nir_iadd(&b, nir_load_var(&b, output_offset),
466 elem_size), 0x1);
467
468 b.cursor = nir_after_cf_node(&store_if->cf_node);
469 }
470
471 b.cursor = nir_after_cf_list(&available_if->else_list);
472
473 available_if = nir_if_create(b.shader);
474 available_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT));
475 nir_cf_node_insert(b.cursor, &available_if->cf_node);
476
477 b.cursor = nir_after_cf_list(&available_if->then_list);
478
479 /* Stores zeros in all outputs. */
480
481 nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
482 nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
483
484 nir_loop *loop = nir_loop_create(b.shader);
485 nir_builder_cf_insert(&b, &loop->cf_node);
486 b.cursor = nir_after_cf_list(&loop->body);
487
488 nir_ssa_def *current_counter = nir_load_var(&b, counter);
489 radv_break_on_count(&b, counter, elem_count);
490
491 nir_ssa_def *output_elem = nir_iadd(&b, output_base,
492 nir_imul(&b, elem_size, current_counter));
493
494 nir_if *store_64bit_if = nir_if_create(b.shader);
495 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
496 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
497
498 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
499
500 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
501 store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
502 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
503 store->src[2] = nir_src_for_ssa(output_elem);
504 nir_intrinsic_set_write_mask(store, 0x1);
505 store->num_components = 1;
506 nir_builder_instr_insert(&b, &store->instr);
507
508 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
509
510 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
511 store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
512 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
513 store->src[2] = nir_src_for_ssa(output_elem);
514 nir_intrinsic_set_write_mask(store, 0x1);
515 store->num_components = 1;
516 nir_builder_instr_insert(&b, &store->instr);
517
518 b.cursor = nir_after_cf_node(&loop->cf_node);
519 return b.shader;
520 }
521
522 static nir_shader *
523 build_tfb_query_shader(struct radv_device *device)
524 {
525 /* the shader this builds is roughly
526 *
527 * uint32_t src_stride = 32;
528 *
529 * location(binding = 0) buffer dst_buf;
530 * location(binding = 1) buffer src_buf;
531 *
532 * void main() {
533 * uint64_t result[2] = {};
534 * bool available = false;
535 * uint64_t src_offset = src_stride * global_id.x;
536 * uint64_t dst_offset = dst_stride * global_id.x;
537 * uint64_t *src_data = src_buf[src_offset];
538 * uint32_t avail = (src_data[0] >> 32) &
539 * (src_data[1] >> 32) &
540 * (src_data[2] >> 32) &
541 * (src_data[3] >> 32);
542 * if (avail & 0x80000000) {
543 * result[0] = src_data[3] - src_data[1];
544 * result[1] = src_data[2] - src_data[0];
545 * available = true;
546 * }
547 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
548 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
549 * if (flags & VK_QUERY_RESULT_64_BIT) {
550 * dst_buf[dst_offset] = result;
551 * } else {
552 * dst_buf[dst_offset] = (uint32_t)result;
553 * }
554 * }
555 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
556 * dst_buf[dst_offset + result_size] = available;
557 * }
558 * }
559 */
560 nir_builder b;
561 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
562 b.shader->info.name = ralloc_strdup(b.shader, "tfb_query");
563 b.shader->info.cs.local_size[0] = 64;
564 b.shader->info.cs.local_size[1] = 1;
565 b.shader->info.cs.local_size[2] = 1;
566
567 /* Create and initialize local variables. */
568 nir_variable *result =
569 nir_local_variable_create(b.impl,
570 glsl_vector_type(GLSL_TYPE_UINT64, 2),
571 "result");
572 nir_variable *available =
573 nir_local_variable_create(b.impl, glsl_bool_type(), "available");
574
575 nir_store_var(&b, result,
576 nir_vec2(&b, nir_imm_int64(&b, 0),
577 nir_imm_int64(&b, 0)), 0x3);
578 nir_store_var(&b, available, nir_imm_false(&b), 0x1);
579
580 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
581
582 /* Load resources. */
583 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
584 nir_intrinsic_vulkan_resource_index);
585 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
586 dst_buf->num_components = 1;
587 nir_intrinsic_set_desc_set(dst_buf, 0);
588 nir_intrinsic_set_binding(dst_buf, 0);
589 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
590 nir_builder_instr_insert(&b, &dst_buf->instr);
591
592 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
593 nir_intrinsic_vulkan_resource_index);
594 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
595 src_buf->num_components = 1;
596 nir_intrinsic_set_desc_set(src_buf, 0);
597 nir_intrinsic_set_binding(src_buf, 1);
598 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
599 nir_builder_instr_insert(&b, &src_buf->instr);
600
601 /* Compute global ID. */
602 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
603 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
604 nir_ssa_def *block_size = nir_imm_ivec4(&b,
605 b.shader->info.cs.local_size[0],
606 b.shader->info.cs.local_size[1],
607 b.shader->info.cs.local_size[2], 0);
608 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
609 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
610
611 /* Compute src/dst strides. */
612 nir_ssa_def *input_stride = nir_imm_int(&b, 32);
613 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
614 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
615 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
616
617 /* Load data from the query pool. */
618 nir_intrinsic_instr *load1 = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
619 load1->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
620 load1->src[1] = nir_src_for_ssa(input_base);
621 nir_ssa_dest_init(&load1->instr, &load1->dest, 4, 32, NULL);
622 load1->num_components = 4;
623 nir_builder_instr_insert(&b, &load1->instr);
624
625 nir_intrinsic_instr *load2 = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
626 load2->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
627 load2->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base, nir_imm_int(&b, 16)));
628 nir_ssa_dest_init(&load2->instr, &load2->dest, 4, 32, NULL);
629 load2->num_components = 4;
630 nir_builder_instr_insert(&b, &load2->instr);
631
632 /* Check if result is available. */
633 nir_ssa_def *avails[2];
634 avails[0] = nir_iand(&b, nir_channel(&b, &load1->dest.ssa, 1),
635 nir_channel(&b, &load1->dest.ssa, 3));
636 avails[1] = nir_iand(&b, nir_channel(&b, &load2->dest.ssa, 1),
637 nir_channel(&b, &load2->dest.ssa, 3));
638 nir_ssa_def *result_is_available =
639 nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]),
640 nir_imm_int(&b, 0x80000000)));
641
642 /* Only compute result if available. */
643 nir_if *available_if = nir_if_create(b.shader);
644 available_if->condition = nir_src_for_ssa(result_is_available);
645 nir_cf_node_insert(b.cursor, &available_if->cf_node);
646
647 b.cursor = nir_after_cf_list(&available_if->then_list);
648
649 /* Pack values. */
650 nir_ssa_def *packed64[4];
651 packed64[0] = nir_pack_64_2x32(&b, nir_vec2(&b,
652 nir_channel(&b, &load1->dest.ssa, 0),
653 nir_channel(&b, &load1->dest.ssa, 1)));
654 packed64[1] = nir_pack_64_2x32(&b, nir_vec2(&b,
655 nir_channel(&b, &load1->dest.ssa, 2),
656 nir_channel(&b, &load1->dest.ssa, 3)));
657 packed64[2] = nir_pack_64_2x32(&b, nir_vec2(&b,
658 nir_channel(&b, &load2->dest.ssa, 0),
659 nir_channel(&b, &load2->dest.ssa, 1)));
660 packed64[3] = nir_pack_64_2x32(&b, nir_vec2(&b,
661 nir_channel(&b, &load2->dest.ssa, 2),
662 nir_channel(&b, &load2->dest.ssa, 3)));
663
664 /* Compute result. */
665 nir_ssa_def *num_primitive_written =
666 nir_isub(&b, packed64[3], packed64[1]);
667 nir_ssa_def *primitive_storage_needed =
668 nir_isub(&b, packed64[2], packed64[0]);
669
670 nir_store_var(&b, result,
671 nir_vec2(&b, num_primitive_written,
672 primitive_storage_needed), 0x3);
673 nir_store_var(&b, available, nir_imm_true(&b), 0x1);
674
675 b.cursor = nir_after_cf_node(&available_if->cf_node);
676
677 /* Determine if result is 64 or 32 bit. */
678 nir_ssa_def *result_is_64bit =
679 nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
680 nir_ssa_def *result_size =
681 nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16),
682 nir_imm_int(&b, 8));
683
684 /* Store the result if complete or partial results have been requested. */
685 nir_if *store_if = nir_if_create(b.shader);
686 store_if->condition =
687 nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
688 nir_load_var(&b, available)));
689 nir_cf_node_insert(b.cursor, &store_if->cf_node);
690
691 b.cursor = nir_after_cf_list(&store_if->then_list);
692
693 /* Store result. */
694 nir_if *store_64bit_if = nir_if_create(b.shader);
695 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
696 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
697
698 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
699
700 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
701 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
702 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
703 store->src[2] = nir_src_for_ssa(output_base);
704 nir_intrinsic_set_write_mask(store, 0x3);
705 store->num_components = 2;
706 nir_builder_instr_insert(&b, &store->instr);
707
708 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
709
710 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
711 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
712 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
713 store->src[2] = nir_src_for_ssa(output_base);
714 nir_intrinsic_set_write_mask(store, 0x3);
715 store->num_components = 2;
716 nir_builder_instr_insert(&b, &store->instr);
717
718 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
719
720 b.cursor = nir_after_cf_node(&store_if->cf_node);
721
722 /* Store the availability bit if requested. */
723 nir_if *availability_if = nir_if_create(b.shader);
724 availability_if->condition =
725 nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
726 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
727
728 b.cursor = nir_after_cf_list(&availability_if->then_list);
729
730 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
731 store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
732 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
733 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
734 nir_intrinsic_set_write_mask(store, 0x1);
735 store->num_components = 1;
736 nir_builder_instr_insert(&b, &store->instr);
737
738 b.cursor = nir_after_cf_node(&availability_if->cf_node);
739
740 return b.shader;
741 }
742
743 static VkResult radv_device_init_meta_query_state_internal(struct radv_device *device)
744 {
745 VkResult result;
746 struct radv_shader_module occlusion_cs = { .nir = NULL };
747 struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
748 struct radv_shader_module tfb_cs = { .nir = NULL };
749
750 mtx_lock(&device->meta_state.mtx);
751 if (device->meta_state.query.pipeline_statistics_query_pipeline) {
752 mtx_unlock(&device->meta_state.mtx);
753 return VK_SUCCESS;
754 }
755 occlusion_cs.nir = build_occlusion_query_shader(device);
756 pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
757 tfb_cs.nir = build_tfb_query_shader(device);
758
759 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
760 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
761 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
762 .bindingCount = 2,
763 .pBindings = (VkDescriptorSetLayoutBinding[]) {
764 {
765 .binding = 0,
766 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
767 .descriptorCount = 1,
768 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
769 .pImmutableSamplers = NULL
770 },
771 {
772 .binding = 1,
773 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
774 .descriptorCount = 1,
775 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
776 .pImmutableSamplers = NULL
777 },
778 }
779 };
780
781 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
782 &occlusion_ds_create_info,
783 &device->meta_state.alloc,
784 &device->meta_state.query.ds_layout);
785 if (result != VK_SUCCESS)
786 goto fail;
787
788 VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
789 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
790 .setLayoutCount = 1,
791 .pSetLayouts = &device->meta_state.query.ds_layout,
792 .pushConstantRangeCount = 1,
793 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
794 };
795
796 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
797 &occlusion_pl_create_info,
798 &device->meta_state.alloc,
799 &device->meta_state.query.p_layout);
800 if (result != VK_SUCCESS)
801 goto fail;
802
803 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
804 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
805 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
806 .module = radv_shader_module_to_handle(&occlusion_cs),
807 .pName = "main",
808 .pSpecializationInfo = NULL,
809 };
810
811 VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
812 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
813 .stage = occlusion_pipeline_shader_stage,
814 .flags = 0,
815 .layout = device->meta_state.query.p_layout,
816 };
817
818 result = radv_CreateComputePipelines(radv_device_to_handle(device),
819 radv_pipeline_cache_to_handle(&device->meta_state.cache),
820 1, &occlusion_vk_pipeline_info, NULL,
821 &device->meta_state.query.occlusion_query_pipeline);
822 if (result != VK_SUCCESS)
823 goto fail;
824
825 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
826 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
827 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
828 .module = radv_shader_module_to_handle(&pipeline_statistics_cs),
829 .pName = "main",
830 .pSpecializationInfo = NULL,
831 };
832
833 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
834 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
835 .stage = pipeline_statistics_pipeline_shader_stage,
836 .flags = 0,
837 .layout = device->meta_state.query.p_layout,
838 };
839
840 result = radv_CreateComputePipelines(radv_device_to_handle(device),
841 radv_pipeline_cache_to_handle(&device->meta_state.cache),
842 1, &pipeline_statistics_vk_pipeline_info, NULL,
843 &device->meta_state.query.pipeline_statistics_query_pipeline);
844 if (result != VK_SUCCESS)
845 goto fail;
846
847 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage = {
848 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
849 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
850 .module = radv_shader_module_to_handle(&tfb_cs),
851 .pName = "main",
852 .pSpecializationInfo = NULL,
853 };
854
855 VkComputePipelineCreateInfo tfb_pipeline_info = {
856 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
857 .stage = tfb_pipeline_shader_stage,
858 .flags = 0,
859 .layout = device->meta_state.query.p_layout,
860 };
861
862 result = radv_CreateComputePipelines(radv_device_to_handle(device),
863 radv_pipeline_cache_to_handle(&device->meta_state.cache),
864 1, &tfb_pipeline_info, NULL,
865 &device->meta_state.query.tfb_query_pipeline);
866 fail:
867 if (result != VK_SUCCESS)
868 radv_device_finish_meta_query_state(device);
869 ralloc_free(occlusion_cs.nir);
870 ralloc_free(pipeline_statistics_cs.nir);
871 ralloc_free(tfb_cs.nir);
872 mtx_unlock(&device->meta_state.mtx);
873 return result;
874 }
875
876 VkResult radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
877 {
878 if (on_demand)
879 return VK_SUCCESS;
880
881 return radv_device_init_meta_query_state_internal(device);
882 }
883
884 void radv_device_finish_meta_query_state(struct radv_device *device)
885 {
886 if (device->meta_state.query.tfb_query_pipeline)
887 radv_DestroyPipeline(radv_device_to_handle(device),
888 device->meta_state.query.tfb_query_pipeline,
889 &device->meta_state.alloc);
890
891 if (device->meta_state.query.pipeline_statistics_query_pipeline)
892 radv_DestroyPipeline(radv_device_to_handle(device),
893 device->meta_state.query.pipeline_statistics_query_pipeline,
894 &device->meta_state.alloc);
895
896 if (device->meta_state.query.occlusion_query_pipeline)
897 radv_DestroyPipeline(radv_device_to_handle(device),
898 device->meta_state.query.occlusion_query_pipeline,
899 &device->meta_state.alloc);
900
901 if (device->meta_state.query.p_layout)
902 radv_DestroyPipelineLayout(radv_device_to_handle(device),
903 device->meta_state.query.p_layout,
904 &device->meta_state.alloc);
905
906 if (device->meta_state.query.ds_layout)
907 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
908 device->meta_state.query.ds_layout,
909 &device->meta_state.alloc);
910 }
911
912 static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
913 VkPipeline *pipeline,
914 struct radeon_winsys_bo *src_bo,
915 struct radeon_winsys_bo *dst_bo,
916 uint64_t src_offset, uint64_t dst_offset,
917 uint32_t src_stride, uint32_t dst_stride,
918 uint32_t count, uint32_t flags,
919 uint32_t pipeline_stats_mask, uint32_t avail_offset)
920 {
921 struct radv_device *device = cmd_buffer->device;
922 struct radv_meta_saved_state saved_state;
923 bool old_predicating;
924
925 if (!*pipeline) {
926 VkResult ret = radv_device_init_meta_query_state_internal(device);
927 if (ret != VK_SUCCESS) {
928 cmd_buffer->record_result = ret;
929 return;
930 }
931 }
932
933 radv_meta_save(&saved_state, cmd_buffer,
934 RADV_META_SAVE_COMPUTE_PIPELINE |
935 RADV_META_SAVE_CONSTANTS |
936 RADV_META_SAVE_DESCRIPTORS);
937
938 /* VK_EXT_conditional_rendering says that copy commands should not be
939 * affected by conditional rendering.
940 */
941 old_predicating = cmd_buffer->state.predicating;
942 cmd_buffer->state.predicating = false;
943
944 struct radv_buffer dst_buffer = {
945 .bo = dst_bo,
946 .offset = dst_offset,
947 .size = dst_stride * count
948 };
949
950 struct radv_buffer src_buffer = {
951 .bo = src_bo,
952 .offset = src_offset,
953 .size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
954 };
955
956 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
957 VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
958
959 radv_meta_push_descriptor_set(cmd_buffer,
960 VK_PIPELINE_BIND_POINT_COMPUTE,
961 device->meta_state.query.p_layout,
962 0, /* set */
963 2, /* descriptorWriteCount */
964 (VkWriteDescriptorSet[]) {
965 {
966 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
967 .dstBinding = 0,
968 .dstArrayElement = 0,
969 .descriptorCount = 1,
970 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
971 .pBufferInfo = &(VkDescriptorBufferInfo) {
972 .buffer = radv_buffer_to_handle(&dst_buffer),
973 .offset = 0,
974 .range = VK_WHOLE_SIZE
975 }
976 },
977 {
978 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
979 .dstBinding = 1,
980 .dstArrayElement = 0,
981 .descriptorCount = 1,
982 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
983 .pBufferInfo = &(VkDescriptorBufferInfo) {
984 .buffer = radv_buffer_to_handle(&src_buffer),
985 .offset = 0,
986 .range = VK_WHOLE_SIZE
987 }
988 }
989 });
990
991 /* Encode the number of elements for easy access by the shader. */
992 pipeline_stats_mask &= 0x7ff;
993 pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
994
995 avail_offset -= src_offset;
996
997 struct {
998 uint32_t flags;
999 uint32_t dst_stride;
1000 uint32_t pipeline_stats_mask;
1001 uint32_t avail_offset;
1002 } push_constants = {
1003 flags,
1004 dst_stride,
1005 pipeline_stats_mask,
1006 avail_offset
1007 };
1008
1009 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
1010 device->meta_state.query.p_layout,
1011 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
1012 &push_constants);
1013
1014 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
1015 RADV_CMD_FLAG_INV_VMEM_L1;
1016
1017 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1018 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
1019
1020 radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
1021
1022 /* Restore conditional rendering. */
1023 cmd_buffer->state.predicating = old_predicating;
1024
1025 radv_meta_restore(&saved_state, cmd_buffer);
1026 }
1027
1028 VkResult radv_CreateQueryPool(
1029 VkDevice _device,
1030 const VkQueryPoolCreateInfo* pCreateInfo,
1031 const VkAllocationCallbacks* pAllocator,
1032 VkQueryPool* pQueryPool)
1033 {
1034 RADV_FROM_HANDLE(radv_device, device, _device);
1035 struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
1036 sizeof(*pool), 8,
1037 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1038 uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
1039 ? TIMESTAMP_NOT_READY : 0;
1040
1041 if (!pool)
1042 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1043
1044
1045 switch(pCreateInfo->queryType) {
1046 case VK_QUERY_TYPE_OCCLUSION:
1047 pool->stride = 16 * get_max_db(device);
1048 break;
1049 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1050 pool->stride = pipelinestat_block_size * 2;
1051 break;
1052 case VK_QUERY_TYPE_TIMESTAMP:
1053 pool->stride = 8;
1054 break;
1055 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1056 pool->stride = 32;
1057 break;
1058 default:
1059 unreachable("creating unhandled query type");
1060 }
1061
1062 pool->type = pCreateInfo->queryType;
1063 pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
1064 pool->availability_offset = pool->stride * pCreateInfo->queryCount;
1065 pool->size = pool->availability_offset;
1066 if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
1067 pool->size += 4 * pCreateInfo->queryCount;
1068
1069 pool->bo = device->ws->buffer_create(device->ws, pool->size,
1070 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING,
1071 RADV_BO_PRIORITY_QUERY_POOL);
1072
1073 if (!pool->bo) {
1074 vk_free2(&device->alloc, pAllocator, pool);
1075 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1076 }
1077
1078 pool->ptr = device->ws->buffer_map(pool->bo);
1079
1080 if (!pool->ptr) {
1081 device->ws->buffer_destroy(pool->bo);
1082 vk_free2(&device->alloc, pAllocator, pool);
1083 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1084 }
1085 memset(pool->ptr, initial_value, pool->size);
1086
1087 *pQueryPool = radv_query_pool_to_handle(pool);
1088 return VK_SUCCESS;
1089 }
1090
1091 void radv_DestroyQueryPool(
1092 VkDevice _device,
1093 VkQueryPool _pool,
1094 const VkAllocationCallbacks* pAllocator)
1095 {
1096 RADV_FROM_HANDLE(radv_device, device, _device);
1097 RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
1098
1099 if (!pool)
1100 return;
1101
1102 device->ws->buffer_destroy(pool->bo);
1103 vk_free2(&device->alloc, pAllocator, pool);
1104 }
1105
1106 VkResult radv_GetQueryPoolResults(
1107 VkDevice _device,
1108 VkQueryPool queryPool,
1109 uint32_t firstQuery,
1110 uint32_t queryCount,
1111 size_t dataSize,
1112 void* pData,
1113 VkDeviceSize stride,
1114 VkQueryResultFlags flags)
1115 {
1116 RADV_FROM_HANDLE(radv_device, device, _device);
1117 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1118 char *data = pData;
1119 VkResult result = VK_SUCCESS;
1120
1121 for(unsigned i = 0; i < queryCount; ++i, data += stride) {
1122 char *dest = data;
1123 unsigned query = firstQuery + i;
1124 char *src = pool->ptr + query * pool->stride;
1125 uint32_t available;
1126
1127 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1128 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1129 while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
1130 ;
1131 available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
1132 }
1133
1134 switch (pool->type) {
1135 case VK_QUERY_TYPE_TIMESTAMP: {
1136 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
1137
1138 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1139 while (*(volatile uint64_t *)src == TIMESTAMP_NOT_READY)
1140 ;
1141 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
1142 }
1143
1144 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1145 result = VK_NOT_READY;
1146
1147 if (flags & VK_QUERY_RESULT_64_BIT) {
1148 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1149 *(uint64_t*)dest = *(uint64_t*)src;
1150 dest += 8;
1151 } else {
1152 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1153 *(uint32_t*)dest = *(uint32_t*)src;
1154 dest += 4;
1155 }
1156 break;
1157 }
1158 case VK_QUERY_TYPE_OCCLUSION: {
1159 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
1160 uint64_t sample_count = 0;
1161 int db_count = get_max_db(device);
1162 available = 1;
1163
1164 for (int i = 0; i < db_count; ++i) {
1165 uint64_t start, end;
1166 do {
1167 start = src64[2 * i];
1168 end = src64[2 * i + 1];
1169 } while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
1170
1171 if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
1172 available = 0;
1173 else {
1174 sample_count += end - start;
1175 }
1176 }
1177
1178 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1179 result = VK_NOT_READY;
1180
1181 if (flags & VK_QUERY_RESULT_64_BIT) {
1182 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1183 *(uint64_t*)dest = sample_count;
1184 dest += 8;
1185 } else {
1186 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1187 *(uint32_t*)dest = sample_count;
1188 dest += 4;
1189 }
1190 break;
1191 }
1192 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1193 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1194 result = VK_NOT_READY;
1195
1196 const uint64_t *start = (uint64_t*)src;
1197 const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
1198 if (flags & VK_QUERY_RESULT_64_BIT) {
1199 uint64_t *dst = (uint64_t*)dest;
1200 dest += util_bitcount(pool->pipeline_stats_mask) * 8;
1201 for(int i = 0; i < 11; ++i) {
1202 if(pool->pipeline_stats_mask & (1u << i)) {
1203 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1204 *dst = stop[pipeline_statistics_indices[i]] -
1205 start[pipeline_statistics_indices[i]];
1206 dst++;
1207 }
1208 }
1209
1210 } else {
1211 uint32_t *dst = (uint32_t*)dest;
1212 dest += util_bitcount(pool->pipeline_stats_mask) * 4;
1213 for(int i = 0; i < 11; ++i) {
1214 if(pool->pipeline_stats_mask & (1u << i)) {
1215 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1216 *dst = stop[pipeline_statistics_indices[i]] -
1217 start[pipeline_statistics_indices[i]];
1218 dst++;
1219 }
1220 }
1221 }
1222 break;
1223 }
1224 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
1225 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
1226 uint64_t num_primitives_written;
1227 uint64_t primitive_storage_needed;
1228
1229 /* SAMPLE_STREAMOUTSTATS stores this structure:
1230 * {
1231 * u64 NumPrimitivesWritten;
1232 * u64 PrimitiveStorageNeeded;
1233 * }
1234 */
1235 available = 1;
1236 for (int j = 0; j < 4; j++) {
1237 if (!(src64[j] & 0x8000000000000000UL))
1238 available = 0;
1239 }
1240
1241 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1242 result = VK_NOT_READY;
1243
1244 num_primitives_written = src64[3] - src64[1];
1245 primitive_storage_needed = src64[2] - src64[0];
1246
1247 if (flags & VK_QUERY_RESULT_64_BIT) {
1248 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1249 *(uint64_t *)dest = num_primitives_written;
1250 dest += 8;
1251 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1252 *(uint64_t *)dest = primitive_storage_needed;
1253 dest += 8;
1254 } else {
1255 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1256 *(uint32_t *)dest = num_primitives_written;
1257 dest += 4;
1258 if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1259 *(uint32_t *)dest = primitive_storage_needed;
1260 dest += 4;
1261 }
1262 break;
1263 }
1264 default:
1265 unreachable("trying to get results of unhandled query type");
1266 }
1267
1268 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1269 if (flags & VK_QUERY_RESULT_64_BIT) {
1270 *(uint64_t*)dest = available;
1271 } else {
1272 *(uint32_t*)dest = available;
1273 }
1274 }
1275 }
1276
1277 return result;
1278 }
1279
1280 void radv_CmdCopyQueryPoolResults(
1281 VkCommandBuffer commandBuffer,
1282 VkQueryPool queryPool,
1283 uint32_t firstQuery,
1284 uint32_t queryCount,
1285 VkBuffer dstBuffer,
1286 VkDeviceSize dstOffset,
1287 VkDeviceSize stride,
1288 VkQueryResultFlags flags)
1289 {
1290 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1291 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1292 RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
1293 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1294 unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
1295 uint64_t va = radv_buffer_get_va(pool->bo);
1296 uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
1297 dest_va += dst_buffer->offset + dstOffset;
1298
1299 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
1300 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
1301
1302 switch (pool->type) {
1303 case VK_QUERY_TYPE_OCCLUSION:
1304 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1305 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1306 unsigned query = firstQuery + i;
1307 uint64_t src_va = va + query * pool->stride + pool->stride - 4;
1308
1309 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1310
1311 /* Waits on the upper word of the last DB entry */
1312 radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL,
1313 src_va, 0x80000000, 0xffffffff);
1314 }
1315 }
1316 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
1317 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1318 dst_buffer->offset + dstOffset,
1319 pool->stride, stride,
1320 queryCount, flags, 0, 0);
1321 break;
1322 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1323 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1324 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1325 unsigned query = firstQuery + i;
1326
1327 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1328
1329 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1330
1331 /* This waits on the ME. All copies below are done on the ME */
1332 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL,
1333 avail_va, 1, 0xffffffff);
1334 }
1335 }
1336 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1337 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1338 dst_buffer->offset + dstOffset,
1339 pool->stride, stride, queryCount, flags,
1340 pool->pipeline_stats_mask,
1341 pool->availability_offset + 4 * firstQuery);
1342 break;
1343 case VK_QUERY_TYPE_TIMESTAMP:
1344 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1345 unsigned query = firstQuery + i;
1346 uint64_t local_src_va = va + query * pool->stride;
1347
1348 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
1349
1350
1351 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1352 /* Wait on the high 32 bits of the timestamp in
1353 * case the low part is 0xffffffff.
1354 */
1355 radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL,
1356 local_src_va + 4,
1357 TIMESTAMP_NOT_READY >> 32,
1358 0xffffffff);
1359 }
1360 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1361 uint64_t avail_dest_va = dest_va + elem_size;
1362
1363 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1364 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1365 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM));
1366 radeon_emit(cs, local_src_va);
1367 radeon_emit(cs, local_src_va >> 32);
1368 radeon_emit(cs, avail_dest_va);
1369 radeon_emit(cs, avail_dest_va >> 32);
1370 }
1371
1372 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1373 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1374 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM) |
1375 ((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
1376 radeon_emit(cs, local_src_va);
1377 radeon_emit(cs, local_src_va >> 32);
1378 radeon_emit(cs, dest_va);
1379 radeon_emit(cs, dest_va >> 32);
1380
1381
1382 assert(cs->cdw <= cdw_max);
1383 }
1384 break;
1385 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1386 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1387 for(unsigned i = 0; i < queryCount; i++) {
1388 unsigned query = firstQuery + i;
1389 uint64_t src_va = va + query * pool->stride;
1390
1391 radeon_check_space(cmd_buffer->device->ws, cs, 7 * 4);
1392
1393 /* Wait on the upper word of all results. */
1394 for (unsigned j = 0; j < 4; j++, src_va += 8) {
1395 radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL,
1396 src_va + 4, 0x80000000,
1397 0xffffffff);
1398 }
1399 }
1400 }
1401
1402 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.tfb_query_pipeline,
1403 pool->bo, dst_buffer->bo,
1404 firstQuery * pool->stride,
1405 dst_buffer->offset + dstOffset,
1406 pool->stride, stride,
1407 queryCount, flags, 0, 0);
1408 break;
1409 default:
1410 unreachable("trying to get results of unhandled query type");
1411 }
1412
1413 }
1414
1415 void radv_CmdResetQueryPool(
1416 VkCommandBuffer commandBuffer,
1417 VkQueryPool queryPool,
1418 uint32_t firstQuery,
1419 uint32_t queryCount)
1420 {
1421 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1422 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1423 uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
1424 ? TIMESTAMP_NOT_READY : 0;
1425 uint32_t flush_bits = 0;
1426
1427 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1428 firstQuery * pool->stride,
1429 queryCount * pool->stride, value);
1430
1431 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1432 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1433 pool->availability_offset + firstQuery * 4,
1434 queryCount * 4, 0);
1435 }
1436
1437 if (flush_bits) {
1438 /* Only need to flush caches for the compute shader path. */
1439 cmd_buffer->pending_reset_query = true;
1440 cmd_buffer->state.flush_bits |= flush_bits;
1441 }
1442 }
1443
1444 void radv_ResetQueryPoolEXT(
1445 VkDevice _device,
1446 VkQueryPool queryPool,
1447 uint32_t firstQuery,
1448 uint32_t queryCount)
1449 {
1450 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1451
1452 uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
1453 ? TIMESTAMP_NOT_READY : 0;
1454 uint32_t *data = (uint32_t*)(pool->ptr + firstQuery * pool->stride);
1455 uint32_t *data_end = (uint32_t*)(pool->ptr + (firstQuery + queryCount) * pool->stride);
1456
1457 for(uint32_t *p = data; p != data_end; ++p)
1458 *p = value;
1459
1460 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1461 memset(pool->ptr + pool->availability_offset + firstQuery * 4,
1462 0, queryCount * 4);
1463 }
1464 }
1465
1466 static unsigned event_type_for_stream(unsigned stream)
1467 {
1468 switch (stream) {
1469 default:
1470 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
1471 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
1472 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
1473 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
1474 }
1475 }
1476
1477 static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
1478 struct radv_query_pool *pool)
1479 {
1480 if (cmd_buffer->pending_reset_query) {
1481 if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
1482 /* Only need to flush caches if the query pool size is
1483 * large enough to be resetted using the compute shader
1484 * path. Small pools don't need any cache flushes
1485 * because we use a CP dma clear.
1486 */
1487 si_emit_cache_flush(cmd_buffer);
1488 }
1489 }
1490 }
1491
1492 static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
1493 uint64_t va,
1494 VkQueryType query_type,
1495 VkQueryControlFlags flags,
1496 uint32_t index)
1497 {
1498 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1499 switch (query_type) {
1500 case VK_QUERY_TYPE_OCCLUSION:
1501 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1502
1503 ++cmd_buffer->state.active_occlusion_queries;
1504 if (cmd_buffer->state.active_occlusion_queries == 1) {
1505 if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
1506 /* This is the first occlusion query, enable
1507 * the hint if the precision bit is set.
1508 */
1509 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1510 }
1511
1512 radv_set_db_count_control(cmd_buffer);
1513 } else {
1514 if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
1515 !cmd_buffer->state.perfect_occlusion_queries_enabled) {
1516 /* This is not the first query, but this one
1517 * needs to enable precision, DB_COUNT_CONTROL
1518 * has to be updated accordingly.
1519 */
1520 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1521
1522 radv_set_db_count_control(cmd_buffer);
1523 }
1524 }
1525
1526 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1527 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1528 radeon_emit(cs, va);
1529 radeon_emit(cs, va >> 32);
1530 break;
1531 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1532 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1533
1534 ++cmd_buffer->state.active_pipeline_queries;
1535 if (cmd_buffer->state.active_pipeline_queries == 1) {
1536 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1537 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
1538 }
1539
1540 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1541 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1542 radeon_emit(cs, va);
1543 radeon_emit(cs, va >> 32);
1544 break;
1545 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1546 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1547
1548 assert(index < MAX_SO_STREAMS);
1549
1550 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1551 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1552 radeon_emit(cs, va);
1553 radeon_emit(cs, va >> 32);
1554 break;
1555 default:
1556 unreachable("beginning unhandled query type");
1557 }
1558
1559 }
1560
1561 static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
1562 uint64_t va, uint64_t avail_va,
1563 VkQueryType query_type, uint32_t index)
1564 {
1565 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1566 switch (query_type) {
1567 case VK_QUERY_TYPE_OCCLUSION:
1568 radeon_check_space(cmd_buffer->device->ws, cs, 14);
1569
1570 cmd_buffer->state.active_occlusion_queries--;
1571 if (cmd_buffer->state.active_occlusion_queries == 0) {
1572 radv_set_db_count_control(cmd_buffer);
1573
1574 /* Reset the perfect occlusion queries hint now that no
1575 * queries are active.
1576 */
1577 cmd_buffer->state.perfect_occlusion_queries_enabled = false;
1578 }
1579
1580 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1581 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1582 radeon_emit(cs, va + 8);
1583 radeon_emit(cs, (va + 8) >> 32);
1584
1585 break;
1586 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1587 radeon_check_space(cmd_buffer->device->ws, cs, 16);
1588
1589 cmd_buffer->state.active_pipeline_queries--;
1590 if (cmd_buffer->state.active_pipeline_queries == 0) {
1591 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
1592 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1593 }
1594 va += pipelinestat_block_size;
1595
1596 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1597 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1598 radeon_emit(cs, va);
1599 radeon_emit(cs, va >> 32);
1600
1601 si_cs_emit_write_event_eop(cs,
1602 cmd_buffer->device->physical_device->rad_info.chip_class,
1603 radv_cmd_buffer_uses_mec(cmd_buffer),
1604 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1605 EOP_DATA_SEL_VALUE_32BIT,
1606 avail_va, 1,
1607 cmd_buffer->gfx9_eop_bug_va);
1608 break;
1609 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1610 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1611
1612 assert(index < MAX_SO_STREAMS);
1613
1614 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1615 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1616 radeon_emit(cs, (va + 16));
1617 radeon_emit(cs, (va + 16) >> 32);
1618 break;
1619 default:
1620 unreachable("ending unhandled query type");
1621 }
1622 }
1623
1624 void radv_CmdBeginQueryIndexedEXT(
1625 VkCommandBuffer commandBuffer,
1626 VkQueryPool queryPool,
1627 uint32_t query,
1628 VkQueryControlFlags flags,
1629 uint32_t index)
1630 {
1631 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1632 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1633 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1634 uint64_t va = radv_buffer_get_va(pool->bo);
1635
1636 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1637
1638 emit_query_flush(cmd_buffer, pool);
1639
1640 va += pool->stride * query;
1641
1642 emit_begin_query(cmd_buffer, va, pool->type, flags, index);
1643 }
1644
1645 void radv_CmdBeginQuery(
1646 VkCommandBuffer commandBuffer,
1647 VkQueryPool queryPool,
1648 uint32_t query,
1649 VkQueryControlFlags flags)
1650 {
1651 radv_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1652 }
1653
1654 void radv_CmdEndQueryIndexedEXT(
1655 VkCommandBuffer commandBuffer,
1656 VkQueryPool queryPool,
1657 uint32_t query,
1658 uint32_t index)
1659 {
1660 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1661 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1662 uint64_t va = radv_buffer_get_va(pool->bo);
1663 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1664 va += pool->stride * query;
1665
1666 /* Do not need to add the pool BO to the list because the query must
1667 * currently be active, which means the BO is already in the list.
1668 */
1669 emit_end_query(cmd_buffer, va, avail_va, pool->type, index);
1670
1671 /*
1672 * For multiview we have to emit a query for each bit in the mask,
1673 * however the first query we emit will get the totals for all the
1674 * operations, so we don't want to get a real value in the other
1675 * queries. This emits a fake begin/end sequence so the waiting
1676 * code gets a completed query value and doesn't hang, but the
1677 * query returns 0.
1678 */
1679 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
1680 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1681
1682
1683 for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
1684 va += pool->stride;
1685 avail_va += 4;
1686 emit_begin_query(cmd_buffer, va, pool->type, 0, 0);
1687 emit_end_query(cmd_buffer, va, avail_va, pool->type, 0);
1688 }
1689 }
1690 }
1691
1692 void radv_CmdEndQuery(
1693 VkCommandBuffer commandBuffer,
1694 VkQueryPool queryPool,
1695 uint32_t query)
1696 {
1697 radv_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1698 }
1699
1700 void radv_CmdWriteTimestamp(
1701 VkCommandBuffer commandBuffer,
1702 VkPipelineStageFlagBits pipelineStage,
1703 VkQueryPool queryPool,
1704 uint32_t query)
1705 {
1706 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1707 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1708 bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1709 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1710 uint64_t va = radv_buffer_get_va(pool->bo);
1711 uint64_t query_va = va + pool->stride * query;
1712
1713 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1714
1715 emit_query_flush(cmd_buffer, pool);
1716
1717 int num_queries = 1;
1718 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
1719 num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
1720
1721 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
1722
1723 for (unsigned i = 0; i < num_queries; i++) {
1724 switch(pipelineStage) {
1725 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1726 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1727 radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1728 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
1729 COPY_DATA_DST_SEL(V_370_MEM));
1730 radeon_emit(cs, 0);
1731 radeon_emit(cs, 0);
1732 radeon_emit(cs, query_va);
1733 radeon_emit(cs, query_va >> 32);
1734 break;
1735 default:
1736 si_cs_emit_write_event_eop(cs,
1737 cmd_buffer->device->physical_device->rad_info.chip_class,
1738 mec,
1739 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1740 EOP_DATA_SEL_TIMESTAMP,
1741 query_va, 0,
1742 cmd_buffer->gfx9_eop_bug_va);
1743 break;
1744 }
1745 query_va += pool->stride;
1746 }
1747 assert(cmd_buffer->cs->cdw <= cdw_max);
1748 }