radv: remove useless sync after copying query results with compute
[mesa.git] / src / amd / vulkan / radv_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
35 #include "radv_cs.h"
36 #include "sid.h"
37
38 #define TIMESTAMP_NOT_READY UINT64_MAX
39
40 static const int pipelinestat_block_size = 11 * 8;
41 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
42
43 static unsigned get_max_db(struct radv_device *device)
44 {
45 unsigned num_db = device->physical_device->rad_info.num_render_backends;
46 MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
47
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask == ((1ull << num_db) - 1));
50
51 return num_db;
52 }
53
54 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
55 {
56 nir_ssa_def *counter = nir_load_var(b, var);
57
58 nir_if *if_stmt = nir_if_create(b->shader);
59 if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
60 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
61
62 b->cursor = nir_after_cf_list(&if_stmt->then_list);
63
64 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
65 nir_builder_instr_insert(b, &instr->instr);
66
67 b->cursor = nir_after_cf_node(&if_stmt->cf_node);
68 counter = nir_iadd(b, counter, nir_imm_int(b, 1));
69 nir_store_var(b, var, counter, 0x1);
70 }
71
72 static struct nir_ssa_def *
73 radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
74 {
75 nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
76 nir_intrinsic_set_base(flags, 0);
77 nir_intrinsic_set_range(flags, 16);
78 flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
79 flags->num_components = 1;
80 nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
81 nir_builder_instr_insert(b, &flags->instr);
82 return &flags->dest.ssa;
83 }
84
85 static nir_shader *
86 build_occlusion_query_shader(struct radv_device *device) {
87 /* the shader this builds is roughly
88 *
89 * push constants {
90 * uint32_t flags;
91 * uint32_t dst_stride;
92 * };
93 *
94 * uint32_t src_stride = 16 * db_count;
95 *
96 * location(binding = 0) buffer dst_buf;
97 * location(binding = 1) buffer src_buf;
98 *
99 * void main() {
100 * uint64_t result = 0;
101 * uint64_t src_offset = src_stride * global_id.x;
102 * uint64_t dst_offset = dst_stride * global_id.x;
103 * bool available = true;
104 * for (int i = 0; i < db_count; ++i) {
105 * uint64_t start = src_buf[src_offset + 16 * i];
106 * uint64_t end = src_buf[src_offset + 16 * i + 8];
107 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
108 * result += end - start;
109 * else
110 * available = false;
111 * }
112 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
113 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
114 * if (flags & VK_QUERY_RESULT_64_BIT)
115 * dst_buf[dst_offset] = result;
116 * else
117 * dst_buf[dst_offset] = (uint32_t)result.
118 * }
119 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
120 * dst_buf[dst_offset + elem_size] = available;
121 * }
122 * }
123 */
124 nir_builder b;
125 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
126 b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
127 b.shader->info.cs.local_size[0] = 64;
128 b.shader->info.cs.local_size[1] = 1;
129 b.shader->info.cs.local_size[2] = 1;
130
131 nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
132 nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
133 nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
134 nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
135 nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available");
136 unsigned db_count = get_max_db(device);
137
138 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
139
140 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
141 nir_intrinsic_vulkan_resource_index);
142 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
143 nir_intrinsic_set_desc_set(dst_buf, 0);
144 nir_intrinsic_set_binding(dst_buf, 0);
145 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
146 nir_builder_instr_insert(&b, &dst_buf->instr);
147
148 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
149 nir_intrinsic_vulkan_resource_index);
150 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
151 nir_intrinsic_set_desc_set(src_buf, 0);
152 nir_intrinsic_set_binding(src_buf, 1);
153 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
154 nir_builder_instr_insert(&b, &src_buf->instr);
155
156 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
157 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
158 nir_ssa_def *block_size = nir_imm_ivec4(&b,
159 b.shader->info.cs.local_size[0],
160 b.shader->info.cs.local_size[1],
161 b.shader->info.cs.local_size[2], 0);
162 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
163 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
164
165 nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
166 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
167 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
168 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
169
170
171 nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
172 nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
173 nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
174
175 nir_loop *outer_loop = nir_loop_create(b.shader);
176 nir_builder_cf_insert(&b, &outer_loop->cf_node);
177 b.cursor = nir_after_cf_list(&outer_loop->body);
178
179 nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
180 radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
181
182 nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
183 load_offset = nir_iadd(&b, input_base, load_offset);
184
185 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
186 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
187 load->src[1] = nir_src_for_ssa(load_offset);
188 nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
189 load->num_components = 2;
190 nir_builder_instr_insert(&b, &load->instr);
191
192 nir_store_var(&b, start, nir_channel(&b, &load->dest.ssa, 0), 0x1);
193 nir_store_var(&b, end, nir_channel(&b, &load->dest.ssa, 1), 0x1);
194
195 nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
196 nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
197
198 nir_if *update_if = nir_if_create(b.shader);
199 update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
200 nir_cf_node_insert(b.cursor, &update_if->cf_node);
201
202 b.cursor = nir_after_cf_list(&update_if->then_list);
203
204 nir_store_var(&b, result,
205 nir_iadd(&b, nir_load_var(&b, result),
206 nir_isub(&b, nir_load_var(&b, end),
207 nir_load_var(&b, start))), 0x1);
208
209 b.cursor = nir_after_cf_list(&update_if->else_list);
210
211 nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
212
213 b.cursor = nir_after_cf_node(&outer_loop->cf_node);
214
215 /* Store the result if complete or if partial results have been requested. */
216
217 nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
218 nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
219 nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
220
221 nir_if *store_if = nir_if_create(b.shader);
222 store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available)));
223 nir_cf_node_insert(b.cursor, &store_if->cf_node);
224
225 b.cursor = nir_after_cf_list(&store_if->then_list);
226
227 nir_if *store_64bit_if = nir_if_create(b.shader);
228 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
229 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
230
231 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
232
233 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
234 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
235 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
236 store->src[2] = nir_src_for_ssa(output_base);
237 nir_intrinsic_set_write_mask(store, 0x1);
238 store->num_components = 1;
239 nir_builder_instr_insert(&b, &store->instr);
240
241 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
242
243 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
244 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
245 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
246 store->src[2] = nir_src_for_ssa(output_base);
247 nir_intrinsic_set_write_mask(store, 0x1);
248 store->num_components = 1;
249 nir_builder_instr_insert(&b, &store->instr);
250
251 b.cursor = nir_after_cf_node(&store_if->cf_node);
252
253 /* Store the availability bit if requested. */
254
255 nir_if *availability_if = nir_if_create(b.shader);
256 availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
257 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
258
259 b.cursor = nir_after_cf_list(&availability_if->then_list);
260
261 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
262 store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
263 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
264 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
265 nir_intrinsic_set_write_mask(store, 0x1);
266 store->num_components = 1;
267 nir_builder_instr_insert(&b, &store->instr);
268
269 return b.shader;
270 }
271
272 static nir_shader *
273 build_pipeline_statistics_query_shader(struct radv_device *device) {
274 /* the shader this builds is roughly
275 *
276 * push constants {
277 * uint32_t flags;
278 * uint32_t dst_stride;
279 * uint32_t stats_mask;
280 * uint32_t avail_offset;
281 * };
282 *
283 * uint32_t src_stride = pipelinestat_block_size * 2;
284 *
285 * location(binding = 0) buffer dst_buf;
286 * location(binding = 1) buffer src_buf;
287 *
288 * void main() {
289 * uint64_t src_offset = src_stride * global_id.x;
290 * uint64_t dst_base = dst_stride * global_id.x;
291 * uint64_t dst_offset = dst_base;
292 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
293 * uint32_t elem_count = stats_mask >> 16;
294 * uint32_t available = src_buf[avail_offset + 4 * global_id.x];
295 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
296 * dst_buf[dst_offset + elem_count * elem_size] = available;
297 * }
298 * if (available) {
299 * // repeat 11 times:
300 * if (stats_mask & (1 << 0)) {
301 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
302 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
303 * uint64_t result = end - start;
304 * if (flags & VK_QUERY_RESULT_64_BIT)
305 * dst_buf[dst_offset] = result;
306 * else
307 * dst_buf[dst_offset] = (uint32_t)result.
308 * dst_offset += elem_size;
309 * }
310 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
311 * // Set everything to 0 as we don't know what is valid.
312 * for (int i = 0; i < elem_count; ++i)
313 * dst_buf[dst_base + elem_size * i] = 0;
314 * }
315 * }
316 */
317 nir_builder b;
318 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
319 b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
320 b.shader->info.cs.local_size[0] = 64;
321 b.shader->info.cs.local_size[1] = 1;
322 b.shader->info.cs.local_size[2] = 1;
323
324 nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
325
326 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
327 nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
328 nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
329
330 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
331 nir_intrinsic_vulkan_resource_index);
332 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
333 nir_intrinsic_set_desc_set(dst_buf, 0);
334 nir_intrinsic_set_binding(dst_buf, 0);
335 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
336 nir_builder_instr_insert(&b, &dst_buf->instr);
337
338 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
339 nir_intrinsic_vulkan_resource_index);
340 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
341 nir_intrinsic_set_desc_set(src_buf, 0);
342 nir_intrinsic_set_binding(src_buf, 1);
343 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
344 nir_builder_instr_insert(&b, &src_buf->instr);
345
346 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
347 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
348 nir_ssa_def *block_size = nir_imm_ivec4(&b,
349 b.shader->info.cs.local_size[0],
350 b.shader->info.cs.local_size[1],
351 b.shader->info.cs.local_size[2], 0);
352 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
353 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
354
355 nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
356 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
357 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
358 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
359
360
361 avail_offset = nir_iadd(&b, avail_offset,
362 nir_imul(&b, global_id, nir_imm_int(&b, 4)));
363
364 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
365 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
366 load->src[1] = nir_src_for_ssa(avail_offset);
367 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
368 load->num_components = 1;
369 nir_builder_instr_insert(&b, &load->instr);
370 nir_ssa_def *available = &load->dest.ssa;
371
372 nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
373 nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
374 nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
375 nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
376
377 /* Store the availability bit if requested. */
378
379 nir_if *availability_if = nir_if_create(b.shader);
380 availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
381 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
382
383 b.cursor = nir_after_cf_list(&availability_if->then_list);
384
385 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
386 store->src[0] = nir_src_for_ssa(available);
387 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
388 store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
389 nir_intrinsic_set_write_mask(store, 0x1);
390 store->num_components = 1;
391 nir_builder_instr_insert(&b, &store->instr);
392
393 b.cursor = nir_after_cf_node(&availability_if->cf_node);
394
395 nir_if *available_if = nir_if_create(b.shader);
396 available_if->condition = nir_src_for_ssa(available);
397 nir_cf_node_insert(b.cursor, &available_if->cf_node);
398
399 b.cursor = nir_after_cf_list(&available_if->then_list);
400
401 nir_store_var(&b, output_offset, output_base, 0x1);
402 for (int i = 0; i < 11; ++i) {
403 nir_if *store_if = nir_if_create(b.shader);
404 store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i)));
405 nir_cf_node_insert(b.cursor, &store_if->cf_node);
406
407 b.cursor = nir_after_cf_list(&store_if->then_list);
408
409 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
410 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
411 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
412 nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
413 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
414 load->num_components = 1;
415 nir_builder_instr_insert(&b, &load->instr);
416 nir_ssa_def *start = &load->dest.ssa;
417
418 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
419 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
420 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
421 nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
422 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
423 load->num_components = 1;
424 nir_builder_instr_insert(&b, &load->instr);
425 nir_ssa_def *end = &load->dest.ssa;
426
427 nir_ssa_def *result = nir_isub(&b, end, start);
428
429 /* Store result */
430 nir_if *store_64bit_if = nir_if_create(b.shader);
431 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
432 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
433
434 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
435
436 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
437 store->src[0] = nir_src_for_ssa(result);
438 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
439 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
440 nir_intrinsic_set_write_mask(store, 0x1);
441 store->num_components = 1;
442 nir_builder_instr_insert(&b, &store->instr);
443
444 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
445
446 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
447 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
448 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
449 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
450 nir_intrinsic_set_write_mask(store, 0x1);
451 store->num_components = 1;
452 nir_builder_instr_insert(&b, &store->instr);
453
454 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
455
456 nir_store_var(&b, output_offset,
457 nir_iadd(&b, nir_load_var(&b, output_offset),
458 elem_size), 0x1);
459
460 b.cursor = nir_after_cf_node(&store_if->cf_node);
461 }
462
463 b.cursor = nir_after_cf_list(&available_if->else_list);
464
465 available_if = nir_if_create(b.shader);
466 available_if->condition = nir_src_for_ssa(nir_iand(&b, flags,
467 nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)));
468 nir_cf_node_insert(b.cursor, &available_if->cf_node);
469
470 b.cursor = nir_after_cf_list(&available_if->then_list);
471
472 /* Stores zeros in all outputs. */
473
474 nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
475 nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
476
477 nir_loop *loop = nir_loop_create(b.shader);
478 nir_builder_cf_insert(&b, &loop->cf_node);
479 b.cursor = nir_after_cf_list(&loop->body);
480
481 nir_ssa_def *current_counter = nir_load_var(&b, counter);
482 radv_break_on_count(&b, counter, elem_count);
483
484 nir_ssa_def *output_elem = nir_iadd(&b, output_base,
485 nir_imul(&b, elem_size, current_counter));
486
487 nir_if *store_64bit_if = nir_if_create(b.shader);
488 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
489 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
490
491 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
492
493 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
494 store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
495 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
496 store->src[2] = nir_src_for_ssa(output_elem);
497 nir_intrinsic_set_write_mask(store, 0x1);
498 store->num_components = 1;
499 nir_builder_instr_insert(&b, &store->instr);
500
501 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
502
503 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
504 store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
505 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
506 store->src[2] = nir_src_for_ssa(output_elem);
507 nir_intrinsic_set_write_mask(store, 0x1);
508 store->num_components = 1;
509 nir_builder_instr_insert(&b, &store->instr);
510
511 b.cursor = nir_after_cf_node(&loop->cf_node);
512 return b.shader;
513 }
514
515 static VkResult radv_device_init_meta_query_state_internal(struct radv_device *device)
516 {
517 VkResult result;
518 struct radv_shader_module occlusion_cs = { .nir = NULL };
519 struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
520
521 mtx_lock(&device->meta_state.mtx);
522 if (device->meta_state.query.pipeline_statistics_query_pipeline) {
523 mtx_unlock(&device->meta_state.mtx);
524 return VK_SUCCESS;
525 }
526 occlusion_cs.nir = build_occlusion_query_shader(device);
527 pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
528
529 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
530 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
531 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
532 .bindingCount = 2,
533 .pBindings = (VkDescriptorSetLayoutBinding[]) {
534 {
535 .binding = 0,
536 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
537 .descriptorCount = 1,
538 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
539 .pImmutableSamplers = NULL
540 },
541 {
542 .binding = 1,
543 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
544 .descriptorCount = 1,
545 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
546 .pImmutableSamplers = NULL
547 },
548 }
549 };
550
551 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
552 &occlusion_ds_create_info,
553 &device->meta_state.alloc,
554 &device->meta_state.query.ds_layout);
555 if (result != VK_SUCCESS)
556 goto fail;
557
558 VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
559 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
560 .setLayoutCount = 1,
561 .pSetLayouts = &device->meta_state.query.ds_layout,
562 .pushConstantRangeCount = 1,
563 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
564 };
565
566 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
567 &occlusion_pl_create_info,
568 &device->meta_state.alloc,
569 &device->meta_state.query.p_layout);
570 if (result != VK_SUCCESS)
571 goto fail;
572
573 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
574 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
575 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
576 .module = radv_shader_module_to_handle(&occlusion_cs),
577 .pName = "main",
578 .pSpecializationInfo = NULL,
579 };
580
581 VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
582 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
583 .stage = occlusion_pipeline_shader_stage,
584 .flags = 0,
585 .layout = device->meta_state.query.p_layout,
586 };
587
588 result = radv_CreateComputePipelines(radv_device_to_handle(device),
589 radv_pipeline_cache_to_handle(&device->meta_state.cache),
590 1, &occlusion_vk_pipeline_info, NULL,
591 &device->meta_state.query.occlusion_query_pipeline);
592 if (result != VK_SUCCESS)
593 goto fail;
594
595 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
596 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
597 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
598 .module = radv_shader_module_to_handle(&pipeline_statistics_cs),
599 .pName = "main",
600 .pSpecializationInfo = NULL,
601 };
602
603 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
604 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
605 .stage = pipeline_statistics_pipeline_shader_stage,
606 .flags = 0,
607 .layout = device->meta_state.query.p_layout,
608 };
609
610 result = radv_CreateComputePipelines(radv_device_to_handle(device),
611 radv_pipeline_cache_to_handle(&device->meta_state.cache),
612 1, &pipeline_statistics_vk_pipeline_info, NULL,
613 &device->meta_state.query.pipeline_statistics_query_pipeline);
614
615 fail:
616 if (result != VK_SUCCESS)
617 radv_device_finish_meta_query_state(device);
618 ralloc_free(occlusion_cs.nir);
619 ralloc_free(pipeline_statistics_cs.nir);
620 mtx_unlock(&device->meta_state.mtx);
621 return result;
622 }
623
624 VkResult radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
625 {
626 if (on_demand)
627 return VK_SUCCESS;
628
629 return radv_device_init_meta_query_state_internal(device);
630 }
631
632 void radv_device_finish_meta_query_state(struct radv_device *device)
633 {
634 if (device->meta_state.query.pipeline_statistics_query_pipeline)
635 radv_DestroyPipeline(radv_device_to_handle(device),
636 device->meta_state.query.pipeline_statistics_query_pipeline,
637 &device->meta_state.alloc);
638
639 if (device->meta_state.query.occlusion_query_pipeline)
640 radv_DestroyPipeline(radv_device_to_handle(device),
641 device->meta_state.query.occlusion_query_pipeline,
642 &device->meta_state.alloc);
643
644 if (device->meta_state.query.p_layout)
645 radv_DestroyPipelineLayout(radv_device_to_handle(device),
646 device->meta_state.query.p_layout,
647 &device->meta_state.alloc);
648
649 if (device->meta_state.query.ds_layout)
650 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
651 device->meta_state.query.ds_layout,
652 &device->meta_state.alloc);
653 }
654
655 static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
656 VkPipeline *pipeline,
657 struct radeon_winsys_bo *src_bo,
658 struct radeon_winsys_bo *dst_bo,
659 uint64_t src_offset, uint64_t dst_offset,
660 uint32_t src_stride, uint32_t dst_stride,
661 uint32_t count, uint32_t flags,
662 uint32_t pipeline_stats_mask, uint32_t avail_offset)
663 {
664 struct radv_device *device = cmd_buffer->device;
665 struct radv_meta_saved_state saved_state;
666
667 if (!*pipeline) {
668 VkResult ret = radv_device_init_meta_query_state_internal(device);
669 if (ret != VK_SUCCESS) {
670 cmd_buffer->record_result = ret;
671 return;
672 }
673 }
674
675 radv_meta_save(&saved_state, cmd_buffer,
676 RADV_META_SAVE_COMPUTE_PIPELINE |
677 RADV_META_SAVE_CONSTANTS |
678 RADV_META_SAVE_DESCRIPTORS);
679
680 struct radv_buffer dst_buffer = {
681 .bo = dst_bo,
682 .offset = dst_offset,
683 .size = dst_stride * count
684 };
685
686 struct radv_buffer src_buffer = {
687 .bo = src_bo,
688 .offset = src_offset,
689 .size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
690 };
691
692 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
693 VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
694
695 radv_meta_push_descriptor_set(cmd_buffer,
696 VK_PIPELINE_BIND_POINT_COMPUTE,
697 device->meta_state.query.p_layout,
698 0, /* set */
699 2, /* descriptorWriteCount */
700 (VkWriteDescriptorSet[]) {
701 {
702 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
703 .dstBinding = 0,
704 .dstArrayElement = 0,
705 .descriptorCount = 1,
706 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
707 .pBufferInfo = &(VkDescriptorBufferInfo) {
708 .buffer = radv_buffer_to_handle(&dst_buffer),
709 .offset = 0,
710 .range = VK_WHOLE_SIZE
711 }
712 },
713 {
714 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
715 .dstBinding = 1,
716 .dstArrayElement = 0,
717 .descriptorCount = 1,
718 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
719 .pBufferInfo = &(VkDescriptorBufferInfo) {
720 .buffer = radv_buffer_to_handle(&src_buffer),
721 .offset = 0,
722 .range = VK_WHOLE_SIZE
723 }
724 }
725 });
726
727 /* Encode the number of elements for easy access by the shader. */
728 pipeline_stats_mask &= 0x7ff;
729 pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
730
731 avail_offset -= src_offset;
732
733 struct {
734 uint32_t flags;
735 uint32_t dst_stride;
736 uint32_t pipeline_stats_mask;
737 uint32_t avail_offset;
738 } push_constants = {
739 flags,
740 dst_stride,
741 pipeline_stats_mask,
742 avail_offset
743 };
744
745 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
746 device->meta_state.query.p_layout,
747 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
748 &push_constants);
749
750 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
751 RADV_CMD_FLAG_INV_VMEM_L1;
752
753 if (flags & VK_QUERY_RESULT_WAIT_BIT)
754 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
755
756 radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
757
758 radv_meta_restore(&saved_state, cmd_buffer);
759 }
760
761 VkResult radv_CreateQueryPool(
762 VkDevice _device,
763 const VkQueryPoolCreateInfo* pCreateInfo,
764 const VkAllocationCallbacks* pAllocator,
765 VkQueryPool* pQueryPool)
766 {
767 RADV_FROM_HANDLE(radv_device, device, _device);
768 struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
769 sizeof(*pool), 8,
770 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
771 uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
772 ? TIMESTAMP_NOT_READY : 0;
773
774 if (!pool)
775 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
776
777
778 switch(pCreateInfo->queryType) {
779 case VK_QUERY_TYPE_OCCLUSION:
780 pool->stride = 16 * get_max_db(device);
781 break;
782 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
783 pool->stride = pipelinestat_block_size * 2;
784 break;
785 case VK_QUERY_TYPE_TIMESTAMP:
786 pool->stride = 8;
787 break;
788 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
789 pool->stride = 32;
790 break;
791 default:
792 unreachable("creating unhandled query type");
793 }
794
795 pool->type = pCreateInfo->queryType;
796 pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
797 pool->availability_offset = pool->stride * pCreateInfo->queryCount;
798 pool->size = pool->availability_offset;
799 if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
800 pool->size += 4 * pCreateInfo->queryCount;
801
802 pool->bo = device->ws->buffer_create(device->ws, pool->size,
803 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
804
805 if (!pool->bo) {
806 vk_free2(&device->alloc, pAllocator, pool);
807 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
808 }
809
810 pool->ptr = device->ws->buffer_map(pool->bo);
811
812 if (!pool->ptr) {
813 device->ws->buffer_destroy(pool->bo);
814 vk_free2(&device->alloc, pAllocator, pool);
815 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
816 }
817 memset(pool->ptr, initial_value, pool->size);
818
819 *pQueryPool = radv_query_pool_to_handle(pool);
820 return VK_SUCCESS;
821 }
822
823 void radv_DestroyQueryPool(
824 VkDevice _device,
825 VkQueryPool _pool,
826 const VkAllocationCallbacks* pAllocator)
827 {
828 RADV_FROM_HANDLE(radv_device, device, _device);
829 RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
830
831 if (!pool)
832 return;
833
834 device->ws->buffer_destroy(pool->bo);
835 vk_free2(&device->alloc, pAllocator, pool);
836 }
837
838 VkResult radv_GetQueryPoolResults(
839 VkDevice _device,
840 VkQueryPool queryPool,
841 uint32_t firstQuery,
842 uint32_t queryCount,
843 size_t dataSize,
844 void* pData,
845 VkDeviceSize stride,
846 VkQueryResultFlags flags)
847 {
848 RADV_FROM_HANDLE(radv_device, device, _device);
849 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
850 char *data = pData;
851 VkResult result = VK_SUCCESS;
852
853 for(unsigned i = 0; i < queryCount; ++i, data += stride) {
854 char *dest = data;
855 unsigned query = firstQuery + i;
856 char *src = pool->ptr + query * pool->stride;
857 uint32_t available;
858
859 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
860 if (flags & VK_QUERY_RESULT_WAIT_BIT)
861 while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
862 ;
863 available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
864 }
865
866 switch (pool->type) {
867 case VK_QUERY_TYPE_TIMESTAMP: {
868 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
869
870 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
871 while (*(volatile uint64_t *)src == TIMESTAMP_NOT_READY)
872 ;
873 available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
874 }
875
876 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
877 result = VK_NOT_READY;
878 break;
879
880 }
881
882 if (flags & VK_QUERY_RESULT_64_BIT) {
883 *(uint64_t*)dest = *(uint64_t*)src;
884 dest += 8;
885 } else {
886 *(uint32_t*)dest = *(uint32_t*)src;
887 dest += 4;
888 }
889 break;
890 }
891 case VK_QUERY_TYPE_OCCLUSION: {
892 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
893 uint64_t sample_count = 0;
894 int db_count = get_max_db(device);
895 available = 1;
896
897 for (int i = 0; i < db_count; ++i) {
898 uint64_t start, end;
899 do {
900 start = src64[2 * i];
901 end = src64[2 * i + 1];
902 } while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
903
904 if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
905 available = 0;
906 else {
907 sample_count += end - start;
908 }
909 }
910
911 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
912 result = VK_NOT_READY;
913 break;
914
915 }
916
917 if (flags & VK_QUERY_RESULT_64_BIT) {
918 *(uint64_t*)dest = sample_count;
919 dest += 8;
920 } else {
921 *(uint32_t*)dest = sample_count;
922 dest += 4;
923 }
924 break;
925 }
926 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
927 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
928 result = VK_NOT_READY;
929 break;
930
931 }
932
933 const uint64_t *start = (uint64_t*)src;
934 const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
935 if (flags & VK_QUERY_RESULT_64_BIT) {
936 uint64_t *dst = (uint64_t*)dest;
937 dest += util_bitcount(pool->pipeline_stats_mask) * 8;
938 for(int i = 0; i < 11; ++i)
939 if(pool->pipeline_stats_mask & (1u << i))
940 *dst++ = stop[pipeline_statistics_indices[i]] -
941 start[pipeline_statistics_indices[i]];
942
943 } else {
944 uint32_t *dst = (uint32_t*)dest;
945 dest += util_bitcount(pool->pipeline_stats_mask) * 4;
946 for(int i = 0; i < 11; ++i)
947 if(pool->pipeline_stats_mask & (1u << i))
948 *dst++ = stop[pipeline_statistics_indices[i]] -
949 start[pipeline_statistics_indices[i]];
950 }
951 break;
952 }
953 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
954 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
955 uint64_t num_primitives_written;
956 uint64_t primitive_storage_needed;
957
958 /* SAMPLE_STREAMOUTSTATS stores this structure:
959 * {
960 * u64 NumPrimitivesWritten;
961 * u64 PrimitiveStorageNeeded;
962 * }
963 */
964 available = 1;
965 for (int j = 0; j < 4; j++) {
966 if (!(src64[j] & 0x8000000000000000UL))
967 available = 0;
968 }
969
970 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
971 result = VK_NOT_READY;
972 break;
973 }
974
975 num_primitives_written = src64[3] - src64[1];
976 primitive_storage_needed = src64[2] - src64[0];
977
978 if (flags & VK_QUERY_RESULT_64_BIT) {
979 *(uint64_t *)dest = num_primitives_written;
980 dest += 8;
981 *(uint64_t *)dest = primitive_storage_needed;
982 dest += 8;
983 } else {
984 *(uint32_t *)dest = num_primitives_written;
985 dest += 4;
986 *(uint32_t *)dest = primitive_storage_needed;
987 dest += 4;
988 }
989 break;
990 }
991 default:
992 unreachable("trying to get results of unhandled query type");
993 }
994
995 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
996 if (flags & VK_QUERY_RESULT_64_BIT) {
997 *(uint64_t*)dest = available;
998 } else {
999 *(uint32_t*)dest = available;
1000 }
1001 }
1002 }
1003
1004 return result;
1005 }
1006
1007 void radv_CmdCopyQueryPoolResults(
1008 VkCommandBuffer commandBuffer,
1009 VkQueryPool queryPool,
1010 uint32_t firstQuery,
1011 uint32_t queryCount,
1012 VkBuffer dstBuffer,
1013 VkDeviceSize dstOffset,
1014 VkDeviceSize stride,
1015 VkQueryResultFlags flags)
1016 {
1017 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1018 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1019 RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
1020 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1021 unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
1022 uint64_t va = radv_buffer_get_va(pool->bo);
1023 uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
1024 dest_va += dst_buffer->offset + dstOffset;
1025
1026 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
1027 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
1028
1029 switch (pool->type) {
1030 case VK_QUERY_TYPE_OCCLUSION:
1031 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1032 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1033 unsigned query = firstQuery + i;
1034 uint64_t src_va = va + query * pool->stride + pool->stride - 4;
1035
1036 /* Waits on the upper word of the last DB entry */
1037 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
1038 radeon_emit(cs, WAIT_REG_MEM_GREATER_OR_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
1039 radeon_emit(cs, src_va);
1040 radeon_emit(cs, src_va >> 32);
1041 radeon_emit(cs, 0x80000000); /* reference value */
1042 radeon_emit(cs, 0xffffffff); /* mask */
1043 radeon_emit(cs, 4); /* poll interval */
1044 }
1045 }
1046 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
1047 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1048 dst_buffer->offset + dstOffset,
1049 pool->stride, stride,
1050 queryCount, flags, 0, 0);
1051 break;
1052 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1053 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1054 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1055 unsigned query = firstQuery + i;
1056
1057 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1058
1059 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1060
1061 /* This waits on the ME. All copies below are done on the ME */
1062 si_emit_wait_fence(cs, avail_va, 1, 0xffffffff);
1063 }
1064 }
1065 radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1066 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1067 dst_buffer->offset + dstOffset,
1068 pool->stride, stride, queryCount, flags,
1069 pool->pipeline_stats_mask,
1070 pool->availability_offset + 4 * firstQuery);
1071 break;
1072 case VK_QUERY_TYPE_TIMESTAMP:
1073 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1074 unsigned query = firstQuery + i;
1075 uint64_t local_src_va = va + query * pool->stride;
1076
1077 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
1078
1079
1080 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1081 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
1082 radeon_emit(cs, WAIT_REG_MEM_NOT_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
1083 radeon_emit(cs, local_src_va);
1084 radeon_emit(cs, local_src_va >> 32);
1085 radeon_emit(cs, TIMESTAMP_NOT_READY >> 32);
1086 radeon_emit(cs, 0xffffffff);
1087 radeon_emit(cs, 4);
1088 }
1089 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1090 uint64_t avail_dest_va = dest_va + elem_size;
1091
1092 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1093 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1094 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM));
1095 radeon_emit(cs, local_src_va);
1096 radeon_emit(cs, local_src_va >> 32);
1097 radeon_emit(cs, avail_dest_va);
1098 radeon_emit(cs, avail_dest_va >> 32);
1099 }
1100
1101 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1102 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
1103 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM) |
1104 ((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
1105 radeon_emit(cs, local_src_va);
1106 radeon_emit(cs, local_src_va >> 32);
1107 radeon_emit(cs, dest_va);
1108 radeon_emit(cs, dest_va >> 32);
1109
1110
1111 assert(cs->cdw <= cdw_max);
1112 }
1113 break;
1114 default:
1115 unreachable("trying to get results of unhandled query type");
1116 }
1117
1118 }
1119
1120 void radv_CmdResetQueryPool(
1121 VkCommandBuffer commandBuffer,
1122 VkQueryPool queryPool,
1123 uint32_t firstQuery,
1124 uint32_t queryCount)
1125 {
1126 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1127 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1128 uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
1129 ? TIMESTAMP_NOT_READY : 0;
1130 uint32_t flush_bits = 0;
1131
1132 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1133 firstQuery * pool->stride,
1134 queryCount * pool->stride, value);
1135
1136 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1137 flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1138 pool->availability_offset + firstQuery * 4,
1139 queryCount * 4, 0);
1140 }
1141
1142 if (flush_bits) {
1143 /* Only need to flush caches for the compute shader path. */
1144 cmd_buffer->pending_reset_query = true;
1145 cmd_buffer->state.flush_bits |= flush_bits;
1146 }
1147 }
1148
1149 static unsigned event_type_for_stream(unsigned stream)
1150 {
1151 switch (stream) {
1152 default:
1153 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
1154 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
1155 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
1156 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
1157 }
1158 }
1159
1160 static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
1161 uint64_t va,
1162 VkQueryType query_type,
1163 VkQueryControlFlags flags,
1164 uint32_t index)
1165 {
1166 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1167 switch (query_type) {
1168 case VK_QUERY_TYPE_OCCLUSION:
1169 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1170
1171 ++cmd_buffer->state.active_occlusion_queries;
1172 if (cmd_buffer->state.active_occlusion_queries == 1) {
1173 if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
1174 /* This is the first occlusion query, enable
1175 * the hint if the precision bit is set.
1176 */
1177 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1178 }
1179
1180 radv_set_db_count_control(cmd_buffer);
1181 } else {
1182 if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
1183 !cmd_buffer->state.perfect_occlusion_queries_enabled) {
1184 /* This is not the first query, but this one
1185 * needs to enable precision, DB_COUNT_CONTROL
1186 * has to be updated accordingly.
1187 */
1188 cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1189
1190 radv_set_db_count_control(cmd_buffer);
1191 }
1192 }
1193
1194 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1195 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1196 radeon_emit(cs, va);
1197 radeon_emit(cs, va >> 32);
1198 break;
1199 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1200 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1201
1202 ++cmd_buffer->state.active_pipeline_queries;
1203 if (cmd_buffer->state.active_pipeline_queries == 1) {
1204 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1205 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
1206 }
1207
1208 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1209 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1210 radeon_emit(cs, va);
1211 radeon_emit(cs, va >> 32);
1212 break;
1213 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1214 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1215
1216 assert(index < MAX_SO_STREAMS);
1217
1218 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1219 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1220 radeon_emit(cs, va);
1221 radeon_emit(cs, va >> 32);
1222 break;
1223 default:
1224 unreachable("beginning unhandled query type");
1225 }
1226
1227 }
1228
1229 static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
1230 uint64_t va, uint64_t avail_va,
1231 VkQueryType query_type, uint32_t index)
1232 {
1233 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1234 switch (query_type) {
1235 case VK_QUERY_TYPE_OCCLUSION:
1236 radeon_check_space(cmd_buffer->device->ws, cs, 14);
1237
1238 cmd_buffer->state.active_occlusion_queries--;
1239 if (cmd_buffer->state.active_occlusion_queries == 0) {
1240 radv_set_db_count_control(cmd_buffer);
1241
1242 /* Reset the perfect occlusion queries hint now that no
1243 * queries are active.
1244 */
1245 cmd_buffer->state.perfect_occlusion_queries_enabled = false;
1246 }
1247
1248 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1249 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1250 radeon_emit(cs, va + 8);
1251 radeon_emit(cs, (va + 8) >> 32);
1252
1253 break;
1254 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1255 radeon_check_space(cmd_buffer->device->ws, cs, 16);
1256
1257 cmd_buffer->state.active_pipeline_queries--;
1258 if (cmd_buffer->state.active_pipeline_queries == 0) {
1259 cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
1260 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1261 }
1262 va += pipelinestat_block_size;
1263
1264 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1265 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1266 radeon_emit(cs, va);
1267 radeon_emit(cs, va >> 32);
1268
1269 si_cs_emit_write_event_eop(cs,
1270 cmd_buffer->device->physical_device->rad_info.chip_class,
1271 radv_cmd_buffer_uses_mec(cmd_buffer),
1272 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1273 EOP_DATA_SEL_VALUE_32BIT,
1274 avail_va, 0, 1,
1275 cmd_buffer->gfx9_eop_bug_va);
1276 break;
1277 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1278 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1279
1280 assert(index < MAX_SO_STREAMS);
1281
1282 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1283 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1284 radeon_emit(cs, (va + 16));
1285 radeon_emit(cs, (va + 16) >> 32);
1286 break;
1287 default:
1288 unreachable("ending unhandled query type");
1289 }
1290 }
1291
1292 void radv_CmdBeginQueryIndexedEXT(
1293 VkCommandBuffer commandBuffer,
1294 VkQueryPool queryPool,
1295 uint32_t query,
1296 VkQueryControlFlags flags,
1297 uint32_t index)
1298 {
1299 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1300 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1301 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1302 uint64_t va = radv_buffer_get_va(pool->bo);
1303
1304 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1305
1306 if (cmd_buffer->pending_reset_query) {
1307 if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
1308 /* Only need to flush caches if the query pool size is
1309 * large enough to be resetted using the compute shader
1310 * path. Small pools don't need any cache flushes
1311 * because we use a CP dma clear.
1312 */
1313 si_emit_cache_flush(cmd_buffer);
1314 cmd_buffer->pending_reset_query = false;
1315 }
1316 }
1317
1318 va += pool->stride * query;
1319
1320 emit_begin_query(cmd_buffer, va, pool->type, flags, index);
1321 }
1322
1323 void radv_CmdBeginQuery(
1324 VkCommandBuffer commandBuffer,
1325 VkQueryPool queryPool,
1326 uint32_t query,
1327 VkQueryControlFlags flags)
1328 {
1329 radv_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1330 }
1331
1332 void radv_CmdEndQueryIndexedEXT(
1333 VkCommandBuffer commandBuffer,
1334 VkQueryPool queryPool,
1335 uint32_t query,
1336 uint32_t index)
1337 {
1338 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1339 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1340 uint64_t va = radv_buffer_get_va(pool->bo);
1341 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1342 va += pool->stride * query;
1343
1344 /* Do not need to add the pool BO to the list because the query must
1345 * currently be active, which means the BO is already in the list.
1346 */
1347 emit_end_query(cmd_buffer, va, avail_va, pool->type, index);
1348
1349 /*
1350 * For multiview we have to emit a query for each bit in the mask,
1351 * however the first query we emit will get the totals for all the
1352 * operations, so we don't want to get a real value in the other
1353 * queries. This emits a fake begin/end sequence so the waiting
1354 * code gets a completed query value and doesn't hang, but the
1355 * query returns 0.
1356 */
1357 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
1358 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1359
1360
1361 for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
1362 va += pool->stride;
1363 avail_va += 4;
1364 emit_begin_query(cmd_buffer, va, pool->type, 0, 0);
1365 emit_end_query(cmd_buffer, va, avail_va, pool->type, 0);
1366 }
1367 }
1368 }
1369
1370 void radv_CmdEndQuery(
1371 VkCommandBuffer commandBuffer,
1372 VkQueryPool queryPool,
1373 uint32_t query)
1374 {
1375 radv_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1376 }
1377
1378 void radv_CmdWriteTimestamp(
1379 VkCommandBuffer commandBuffer,
1380 VkPipelineStageFlagBits pipelineStage,
1381 VkQueryPool queryPool,
1382 uint32_t query)
1383 {
1384 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1385 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1386 bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1387 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1388 uint64_t va = radv_buffer_get_va(pool->bo);
1389 uint64_t query_va = va + pool->stride * query;
1390
1391 radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1392
1393 int num_queries = 1;
1394 if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
1395 num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
1396
1397 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
1398
1399 for (unsigned i = 0; i < num_queries; i++) {
1400 switch(pipelineStage) {
1401 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1402 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1403 radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1404 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
1405 COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
1406 radeon_emit(cs, 0);
1407 radeon_emit(cs, 0);
1408 radeon_emit(cs, query_va);
1409 radeon_emit(cs, query_va >> 32);
1410 break;
1411 default:
1412 si_cs_emit_write_event_eop(cs,
1413 cmd_buffer->device->physical_device->rad_info.chip_class,
1414 mec,
1415 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1416 EOP_DATA_SEL_TIMESTAMP,
1417 query_va, 0, 0,
1418 cmd_buffer->gfx9_eop_bug_va);
1419 break;
1420 }
1421 query_va += pool->stride;
1422 }
1423 assert(cmd_buffer->cs->cdw <= cdw_max);
1424 }