radv: fewer than 8 RBs are possible
[mesa.git] / src / amd / vulkan / radv_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
35 #include "radv_cs.h"
36 #include "sid.h"
37
38
39 static const int pipelinestat_block_size = 11 * 8;
40 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
41
42 static unsigned get_max_db(struct radv_device *device)
43 {
44 unsigned num_db = device->physical_device->rad_info.num_render_backends;
45 MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
46
47 if (device->physical_device->rad_info.chip_class == SI)
48 num_db = 8;
49
50 /* Otherwise we need to change the query reset procedure */
51 assert(rb_mask == ((1ull << num_db) - 1));
52
53 return num_db;
54 }
55
56 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
57 {
58 nir_ssa_def *counter = nir_load_var(b, var);
59
60 nir_if *if_stmt = nir_if_create(b->shader);
61 if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
62 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
63
64 b->cursor = nir_after_cf_list(&if_stmt->then_list);
65
66 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
67 nir_builder_instr_insert(b, &instr->instr);
68
69 b->cursor = nir_after_cf_node(&if_stmt->cf_node);
70 counter = nir_iadd(b, counter, nir_imm_int(b, 1));
71 nir_store_var(b, var, counter, 0x1);
72 }
73
74 static struct nir_ssa_def *
75 radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
76 {
77 nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
78 nir_intrinsic_set_base(flags, 0);
79 nir_intrinsic_set_range(flags, 16);
80 flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
81 flags->num_components = 1;
82 nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
83 nir_builder_instr_insert(b, &flags->instr);
84 return &flags->dest.ssa;
85 }
86
87 static nir_shader *
88 build_occlusion_query_shader(struct radv_device *device) {
89 /* the shader this builds is roughly
90 *
91 * push constants {
92 * uint32_t flags;
93 * uint32_t dst_stride;
94 * };
95 *
96 * uint32_t src_stride = 16 * db_count;
97 *
98 * location(binding = 0) buffer dst_buf;
99 * location(binding = 1) buffer src_buf;
100 *
101 * void main() {
102 * uint64_t result = 0;
103 * uint64_t src_offset = src_stride * global_id.x;
104 * uint64_t dst_offset = dst_stride * global_id.x;
105 * bool available = true;
106 * for (int i = 0; i < db_count; ++i) {
107 * uint64_t start = src_buf[src_offset + 16 * i];
108 * uint64_t end = src_buf[src_offset + 16 * i + 8];
109 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
110 * result += end - start;
111 * else
112 * available = false;
113 * }
114 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
115 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
116 * if (flags & VK_QUERY_RESULT_64_BIT)
117 * dst_buf[dst_offset] = result;
118 * else
119 * dst_buf[dst_offset] = (uint32_t)result.
120 * }
121 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
122 * dst_buf[dst_offset + elem_size] = available;
123 * }
124 * }
125 */
126 nir_builder b;
127 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
128 b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
129 b.shader->info.cs.local_size[0] = 64;
130 b.shader->info.cs.local_size[1] = 1;
131 b.shader->info.cs.local_size[2] = 1;
132
133 nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
134 nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
135 nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
136 nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
137 nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available");
138 unsigned db_count = get_max_db(device);
139
140 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
141
142 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
143 nir_intrinsic_vulkan_resource_index);
144 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
145 nir_intrinsic_set_desc_set(dst_buf, 0);
146 nir_intrinsic_set_binding(dst_buf, 0);
147 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
148 nir_builder_instr_insert(&b, &dst_buf->instr);
149
150 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
151 nir_intrinsic_vulkan_resource_index);
152 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
153 nir_intrinsic_set_desc_set(src_buf, 0);
154 nir_intrinsic_set_binding(src_buf, 1);
155 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
156 nir_builder_instr_insert(&b, &src_buf->instr);
157
158 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
159 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
160 nir_ssa_def *block_size = nir_imm_ivec4(&b,
161 b.shader->info.cs.local_size[0],
162 b.shader->info.cs.local_size[1],
163 b.shader->info.cs.local_size[2], 0);
164 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
165 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
166
167 nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
168 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
169 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
170 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
171
172
173 nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
174 nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
175 nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
176
177 nir_loop *outer_loop = nir_loop_create(b.shader);
178 nir_builder_cf_insert(&b, &outer_loop->cf_node);
179 b.cursor = nir_after_cf_list(&outer_loop->body);
180
181 nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
182 radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
183
184 nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
185 load_offset = nir_iadd(&b, input_base, load_offset);
186
187 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
188 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
189 load->src[1] = nir_src_for_ssa(load_offset);
190 nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
191 load->num_components = 2;
192 nir_builder_instr_insert(&b, &load->instr);
193
194 const unsigned swizzle0[] = {0,0,0,0};
195 const unsigned swizzle1[] = {1,1,1,1};
196 nir_store_var(&b, start, nir_swizzle(&b, &load->dest.ssa, swizzle0, 1, false), 0x1);
197 nir_store_var(&b, end, nir_swizzle(&b, &load->dest.ssa, swizzle1, 1, false), 0x1);
198
199 nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
200 nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
201
202 nir_if *update_if = nir_if_create(b.shader);
203 update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
204 nir_cf_node_insert(b.cursor, &update_if->cf_node);
205
206 b.cursor = nir_after_cf_list(&update_if->then_list);
207
208 nir_store_var(&b, result,
209 nir_iadd(&b, nir_load_var(&b, result),
210 nir_isub(&b, nir_load_var(&b, end),
211 nir_load_var(&b, start))), 0x1);
212
213 b.cursor = nir_after_cf_list(&update_if->else_list);
214
215 nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
216
217 b.cursor = nir_after_cf_node(&outer_loop->cf_node);
218
219 /* Store the result if complete or if partial results have been requested. */
220
221 nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
222 nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
223 nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
224
225 nir_if *store_if = nir_if_create(b.shader);
226 store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available)));
227 nir_cf_node_insert(b.cursor, &store_if->cf_node);
228
229 b.cursor = nir_after_cf_list(&store_if->then_list);
230
231 nir_if *store_64bit_if = nir_if_create(b.shader);
232 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
233 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
234
235 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
236
237 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
238 store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
239 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
240 store->src[2] = nir_src_for_ssa(output_base);
241 nir_intrinsic_set_write_mask(store, 0x1);
242 store->num_components = 1;
243 nir_builder_instr_insert(&b, &store->instr);
244
245 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
246
247 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
248 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
249 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
250 store->src[2] = nir_src_for_ssa(output_base);
251 nir_intrinsic_set_write_mask(store, 0x1);
252 store->num_components = 1;
253 nir_builder_instr_insert(&b, &store->instr);
254
255 b.cursor = nir_after_cf_node(&store_if->cf_node);
256
257 /* Store the availability bit if requested. */
258
259 nir_if *availability_if = nir_if_create(b.shader);
260 availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
261 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
262
263 b.cursor = nir_after_cf_list(&availability_if->then_list);
264
265 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
266 store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
267 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
268 store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
269 nir_intrinsic_set_write_mask(store, 0x1);
270 store->num_components = 1;
271 nir_builder_instr_insert(&b, &store->instr);
272
273 return b.shader;
274 }
275
276 static nir_shader *
277 build_pipeline_statistics_query_shader(struct radv_device *device) {
278 /* the shader this builds is roughly
279 *
280 * push constants {
281 * uint32_t flags;
282 * uint32_t dst_stride;
283 * uint32_t stats_mask;
284 * uint32_t avail_offset;
285 * };
286 *
287 * uint32_t src_stride = pipelinestat_block_size * 2;
288 *
289 * location(binding = 0) buffer dst_buf;
290 * location(binding = 1) buffer src_buf;
291 *
292 * void main() {
293 * uint64_t src_offset = src_stride * global_id.x;
294 * uint64_t dst_base = dst_stride * global_id.x;
295 * uint64_t dst_offset = dst_base;
296 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
297 * uint32_t elem_count = stats_mask >> 16;
298 * uint32_t available = src_buf[avail_offset + 4 * global_id.x];
299 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
300 * dst_buf[dst_offset + elem_count * elem_size] = available;
301 * }
302 * if (available) {
303 * // repeat 11 times:
304 * if (stats_mask & (1 << 0)) {
305 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
306 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
307 * uint64_t result = end - start;
308 * if (flags & VK_QUERY_RESULT_64_BIT)
309 * dst_buf[dst_offset] = result;
310 * else
311 * dst_buf[dst_offset] = (uint32_t)result.
312 * dst_offset += elem_size;
313 * }
314 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
315 * // Set everything to 0 as we don't know what is valid.
316 * for (int i = 0; i < elem_count; ++i)
317 * dst_buf[dst_base + elem_size * i] = 0;
318 * }
319 * }
320 */
321 nir_builder b;
322 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
323 b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
324 b.shader->info.cs.local_size[0] = 64;
325 b.shader->info.cs.local_size[1] = 1;
326 b.shader->info.cs.local_size[2] = 1;
327
328 nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
329
330 nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
331 nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
332 nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
333
334 nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
335 nir_intrinsic_vulkan_resource_index);
336 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
337 nir_intrinsic_set_desc_set(dst_buf, 0);
338 nir_intrinsic_set_binding(dst_buf, 0);
339 nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
340 nir_builder_instr_insert(&b, &dst_buf->instr);
341
342 nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
343 nir_intrinsic_vulkan_resource_index);
344 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
345 nir_intrinsic_set_desc_set(src_buf, 0);
346 nir_intrinsic_set_binding(src_buf, 1);
347 nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
348 nir_builder_instr_insert(&b, &src_buf->instr);
349
350 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
351 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
352 nir_ssa_def *block_size = nir_imm_ivec4(&b,
353 b.shader->info.cs.local_size[0],
354 b.shader->info.cs.local_size[1],
355 b.shader->info.cs.local_size[2], 0);
356 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
357 global_id = nir_channel(&b, global_id, 0); // We only care about x here.
358
359 nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
360 nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
361 nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
362 nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
363
364
365 avail_offset = nir_iadd(&b, avail_offset,
366 nir_imul(&b, global_id, nir_imm_int(&b, 4)));
367
368 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
369 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
370 load->src[1] = nir_src_for_ssa(avail_offset);
371 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
372 load->num_components = 1;
373 nir_builder_instr_insert(&b, &load->instr);
374 nir_ssa_def *available = &load->dest.ssa;
375
376 nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
377 nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
378 nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
379 nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
380
381 /* Store the availability bit if requested. */
382
383 nir_if *availability_if = nir_if_create(b.shader);
384 availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
385 nir_cf_node_insert(b.cursor, &availability_if->cf_node);
386
387 b.cursor = nir_after_cf_list(&availability_if->then_list);
388
389 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
390 store->src[0] = nir_src_for_ssa(available);
391 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
392 store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
393 nir_intrinsic_set_write_mask(store, 0x1);
394 store->num_components = 1;
395 nir_builder_instr_insert(&b, &store->instr);
396
397 b.cursor = nir_after_cf_node(&availability_if->cf_node);
398
399 nir_if *available_if = nir_if_create(b.shader);
400 available_if->condition = nir_src_for_ssa(available);
401 nir_cf_node_insert(b.cursor, &available_if->cf_node);
402
403 b.cursor = nir_after_cf_list(&available_if->then_list);
404
405 nir_store_var(&b, output_offset, output_base, 0x1);
406 for (int i = 0; i < 11; ++i) {
407 nir_if *store_if = nir_if_create(b.shader);
408 store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i)));
409 nir_cf_node_insert(b.cursor, &store_if->cf_node);
410
411 b.cursor = nir_after_cf_list(&store_if->then_list);
412
413 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
414 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
415 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
416 nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
417 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
418 load->num_components = 1;
419 nir_builder_instr_insert(&b, &load->instr);
420 nir_ssa_def *start = &load->dest.ssa;
421
422 load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
423 load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
424 load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
425 nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
426 nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
427 load->num_components = 1;
428 nir_builder_instr_insert(&b, &load->instr);
429 nir_ssa_def *end = &load->dest.ssa;
430
431 nir_ssa_def *result = nir_isub(&b, end, start);
432
433 /* Store result */
434 nir_if *store_64bit_if = nir_if_create(b.shader);
435 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
436 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
437
438 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
439
440 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
441 store->src[0] = nir_src_for_ssa(result);
442 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
443 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
444 nir_intrinsic_set_write_mask(store, 0x1);
445 store->num_components = 1;
446 nir_builder_instr_insert(&b, &store->instr);
447
448 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
449
450 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
451 store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
452 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
453 store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
454 nir_intrinsic_set_write_mask(store, 0x1);
455 store->num_components = 1;
456 nir_builder_instr_insert(&b, &store->instr);
457
458 b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
459
460 nir_store_var(&b, output_offset,
461 nir_iadd(&b, nir_load_var(&b, output_offset),
462 elem_size), 0x1);
463
464 b.cursor = nir_after_cf_node(&store_if->cf_node);
465 }
466
467 b.cursor = nir_after_cf_list(&available_if->else_list);
468
469 available_if = nir_if_create(b.shader);
470 available_if->condition = nir_src_for_ssa(nir_iand(&b, flags,
471 nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)));
472 nir_cf_node_insert(b.cursor, &available_if->cf_node);
473
474 b.cursor = nir_after_cf_list(&available_if->then_list);
475
476 /* Stores zeros in all outputs. */
477
478 nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
479 nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
480
481 nir_loop *loop = nir_loop_create(b.shader);
482 nir_builder_cf_insert(&b, &loop->cf_node);
483 b.cursor = nir_after_cf_list(&loop->body);
484
485 nir_ssa_def *current_counter = nir_load_var(&b, counter);
486 radv_break_on_count(&b, counter, elem_count);
487
488 nir_ssa_def *output_elem = nir_iadd(&b, output_base,
489 nir_imul(&b, elem_size, current_counter));
490
491 nir_if *store_64bit_if = nir_if_create(b.shader);
492 store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
493 nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
494
495 b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
496
497 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
498 store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
499 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
500 store->src[2] = nir_src_for_ssa(output_elem);
501 nir_intrinsic_set_write_mask(store, 0x1);
502 store->num_components = 1;
503 nir_builder_instr_insert(&b, &store->instr);
504
505 b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
506
507 store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
508 store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
509 store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
510 store->src[2] = nir_src_for_ssa(output_elem);
511 nir_intrinsic_set_write_mask(store, 0x1);
512 store->num_components = 1;
513 nir_builder_instr_insert(&b, &store->instr);
514
515 b.cursor = nir_after_cf_node(&loop->cf_node);
516 return b.shader;
517 }
518
519 VkResult radv_device_init_meta_query_state(struct radv_device *device)
520 {
521 VkResult result;
522 struct radv_shader_module occlusion_cs = { .nir = NULL };
523 struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
524
525 zero(device->meta_state.query);
526
527 occlusion_cs.nir = build_occlusion_query_shader(device);
528 pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
529
530 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
531 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
532 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
533 .bindingCount = 2,
534 .pBindings = (VkDescriptorSetLayoutBinding[]) {
535 {
536 .binding = 0,
537 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
538 .descriptorCount = 1,
539 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
540 .pImmutableSamplers = NULL
541 },
542 {
543 .binding = 1,
544 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
545 .descriptorCount = 1,
546 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
547 .pImmutableSamplers = NULL
548 },
549 }
550 };
551
552 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
553 &occlusion_ds_create_info,
554 &device->meta_state.alloc,
555 &device->meta_state.query.ds_layout);
556 if (result != VK_SUCCESS)
557 goto fail;
558
559 VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
560 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
561 .setLayoutCount = 1,
562 .pSetLayouts = &device->meta_state.query.ds_layout,
563 .pushConstantRangeCount = 1,
564 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
565 };
566
567 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
568 &occlusion_pl_create_info,
569 &device->meta_state.alloc,
570 &device->meta_state.query.p_layout);
571 if (result != VK_SUCCESS)
572 goto fail;
573
574 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
575 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
576 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
577 .module = radv_shader_module_to_handle(&occlusion_cs),
578 .pName = "main",
579 .pSpecializationInfo = NULL,
580 };
581
582 VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
583 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
584 .stage = occlusion_pipeline_shader_stage,
585 .flags = 0,
586 .layout = device->meta_state.query.p_layout,
587 };
588
589 result = radv_CreateComputePipelines(radv_device_to_handle(device),
590 radv_pipeline_cache_to_handle(&device->meta_state.cache),
591 1, &occlusion_vk_pipeline_info, NULL,
592 &device->meta_state.query.occlusion_query_pipeline);
593 if (result != VK_SUCCESS)
594 goto fail;
595
596 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
597 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
598 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
599 .module = radv_shader_module_to_handle(&pipeline_statistics_cs),
600 .pName = "main",
601 .pSpecializationInfo = NULL,
602 };
603
604 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
605 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
606 .stage = pipeline_statistics_pipeline_shader_stage,
607 .flags = 0,
608 .layout = device->meta_state.query.p_layout,
609 };
610
611 result = radv_CreateComputePipelines(radv_device_to_handle(device),
612 radv_pipeline_cache_to_handle(&device->meta_state.cache),
613 1, &pipeline_statistics_vk_pipeline_info, NULL,
614 &device->meta_state.query.pipeline_statistics_query_pipeline);
615
616 fail:
617 if (result != VK_SUCCESS)
618 radv_device_finish_meta_query_state(device);
619 ralloc_free(occlusion_cs.nir);
620 ralloc_free(pipeline_statistics_cs.nir);
621 return result;
622 }
623
624 void radv_device_finish_meta_query_state(struct radv_device *device)
625 {
626 if (device->meta_state.query.pipeline_statistics_query_pipeline)
627 radv_DestroyPipeline(radv_device_to_handle(device),
628 device->meta_state.query.pipeline_statistics_query_pipeline,
629 &device->meta_state.alloc);
630
631 if (device->meta_state.query.occlusion_query_pipeline)
632 radv_DestroyPipeline(radv_device_to_handle(device),
633 device->meta_state.query.occlusion_query_pipeline,
634 &device->meta_state.alloc);
635
636 if (device->meta_state.query.p_layout)
637 radv_DestroyPipelineLayout(radv_device_to_handle(device),
638 device->meta_state.query.p_layout,
639 &device->meta_state.alloc);
640
641 if (device->meta_state.query.ds_layout)
642 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
643 device->meta_state.query.ds_layout,
644 &device->meta_state.alloc);
645 }
646
647 static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
648 VkPipeline pipeline,
649 struct radeon_winsys_bo *src_bo,
650 struct radeon_winsys_bo *dst_bo,
651 uint64_t src_offset, uint64_t dst_offset,
652 uint32_t src_stride, uint32_t dst_stride,
653 uint32_t count, uint32_t flags,
654 uint32_t pipeline_stats_mask, uint32_t avail_offset)
655 {
656 struct radv_device *device = cmd_buffer->device;
657 struct radv_meta_saved_compute_state saved_state;
658
659 radv_meta_save_compute(&saved_state, cmd_buffer, 4);
660
661 struct radv_buffer dst_buffer = {
662 .bo = dst_bo,
663 .offset = dst_offset,
664 .size = dst_stride * count
665 };
666
667 struct radv_buffer src_buffer = {
668 .bo = src_bo,
669 .offset = src_offset,
670 .size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
671 };
672
673 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
674 VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
675
676 radv_meta_push_descriptor_set(cmd_buffer,
677 VK_PIPELINE_BIND_POINT_COMPUTE,
678 device->meta_state.query.p_layout,
679 0, /* set */
680 2, /* descriptorWriteCount */
681 (VkWriteDescriptorSet[]) {
682 {
683 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
684 .dstBinding = 0,
685 .dstArrayElement = 0,
686 .descriptorCount = 1,
687 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
688 .pBufferInfo = &(VkDescriptorBufferInfo) {
689 .buffer = radv_buffer_to_handle(&dst_buffer),
690 .offset = 0,
691 .range = VK_WHOLE_SIZE
692 }
693 },
694 {
695 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
696 .dstBinding = 1,
697 .dstArrayElement = 0,
698 .descriptorCount = 1,
699 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
700 .pBufferInfo = &(VkDescriptorBufferInfo) {
701 .buffer = radv_buffer_to_handle(&src_buffer),
702 .offset = 0,
703 .range = VK_WHOLE_SIZE
704 }
705 }
706 });
707
708 /* Encode the number of elements for easy access by the shader. */
709 pipeline_stats_mask &= 0x7ff;
710 pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
711
712 avail_offset -= src_offset;
713
714 struct {
715 uint32_t flags;
716 uint32_t dst_stride;
717 uint32_t pipeline_stats_mask;
718 uint32_t avail_offset;
719 } push_constants = {
720 flags,
721 dst_stride,
722 pipeline_stats_mask,
723 avail_offset
724 };
725
726 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
727 device->meta_state.query.p_layout,
728 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
729 &push_constants);
730
731 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
732 RADV_CMD_FLAG_INV_VMEM_L1;
733
734 if (flags & VK_QUERY_RESULT_WAIT_BIT)
735 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
736
737 radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
738
739 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
740 RADV_CMD_FLAG_INV_VMEM_L1 |
741 RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
742
743 radv_meta_restore_compute(&saved_state, cmd_buffer, 4);
744 }
745
746 VkResult radv_CreateQueryPool(
747 VkDevice _device,
748 const VkQueryPoolCreateInfo* pCreateInfo,
749 const VkAllocationCallbacks* pAllocator,
750 VkQueryPool* pQueryPool)
751 {
752 RADV_FROM_HANDLE(radv_device, device, _device);
753 uint64_t size;
754 struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
755 sizeof(*pool), 8,
756 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
757
758 if (!pool)
759 return VK_ERROR_OUT_OF_HOST_MEMORY;
760
761
762 switch(pCreateInfo->queryType) {
763 case VK_QUERY_TYPE_OCCLUSION:
764 pool->stride = 16 * get_max_db(device);
765 break;
766 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
767 pool->stride = pipelinestat_block_size * 2;
768 break;
769 case VK_QUERY_TYPE_TIMESTAMP:
770 pool->stride = 8;
771 break;
772 default:
773 unreachable("creating unhandled query type");
774 }
775
776 pool->type = pCreateInfo->queryType;
777 pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
778 pool->availability_offset = pool->stride * pCreateInfo->queryCount;
779 size = pool->availability_offset;
780 if (pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP ||
781 pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
782 size += 4 * pCreateInfo->queryCount;
783
784 pool->bo = device->ws->buffer_create(device->ws, size,
785 64, RADEON_DOMAIN_GTT, 0);
786
787 if (!pool->bo) {
788 vk_free2(&device->alloc, pAllocator, pool);
789 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
790 }
791
792 pool->ptr = device->ws->buffer_map(pool->bo);
793
794 if (!pool->ptr) {
795 device->ws->buffer_destroy(pool->bo);
796 vk_free2(&device->alloc, pAllocator, pool);
797 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
798 }
799 memset(pool->ptr, 0, size);
800
801 *pQueryPool = radv_query_pool_to_handle(pool);
802 return VK_SUCCESS;
803 }
804
805 void radv_DestroyQueryPool(
806 VkDevice _device,
807 VkQueryPool _pool,
808 const VkAllocationCallbacks* pAllocator)
809 {
810 RADV_FROM_HANDLE(radv_device, device, _device);
811 RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
812
813 if (!pool)
814 return;
815
816 device->ws->buffer_destroy(pool->bo);
817 vk_free2(&device->alloc, pAllocator, pool);
818 }
819
820 VkResult radv_GetQueryPoolResults(
821 VkDevice _device,
822 VkQueryPool queryPool,
823 uint32_t firstQuery,
824 uint32_t queryCount,
825 size_t dataSize,
826 void* pData,
827 VkDeviceSize stride,
828 VkQueryResultFlags flags)
829 {
830 RADV_FROM_HANDLE(radv_device, device, _device);
831 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
832 char *data = pData;
833 VkResult result = VK_SUCCESS;
834
835 for(unsigned i = 0; i < queryCount; ++i, data += stride) {
836 char *dest = data;
837 unsigned query = firstQuery + i;
838 char *src = pool->ptr + query * pool->stride;
839 uint32_t available;
840
841 if (pool->type != VK_QUERY_TYPE_OCCLUSION) {
842 if (flags & VK_QUERY_RESULT_WAIT_BIT)
843 while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
844 ;
845 available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
846 }
847
848 switch (pool->type) {
849 case VK_QUERY_TYPE_TIMESTAMP: {
850 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
851 result = VK_NOT_READY;
852 break;
853
854 }
855
856 if (flags & VK_QUERY_RESULT_64_BIT) {
857 *(uint64_t*)dest = *(uint64_t*)src;
858 dest += 8;
859 } else {
860 *(uint32_t*)dest = *(uint32_t*)src;
861 dest += 4;
862 }
863 break;
864 }
865 case VK_QUERY_TYPE_OCCLUSION: {
866 volatile uint64_t const *src64 = (volatile uint64_t const *)src;
867 uint64_t sample_count = 0;
868 int db_count = get_max_db(device);
869 available = 1;
870
871 for (int i = 0; i < db_count; ++i) {
872 uint64_t start, end;
873 do {
874 start = src64[2 * i];
875 end = src64[2 * i + 1];
876 } while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
877
878 if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
879 available = 0;
880 else {
881 sample_count += end - start;
882 }
883 }
884
885 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
886 result = VK_NOT_READY;
887 break;
888
889 }
890
891 if (flags & VK_QUERY_RESULT_64_BIT) {
892 *(uint64_t*)dest = sample_count;
893 dest += 8;
894 } else {
895 *(uint32_t*)dest = sample_count;
896 dest += 4;
897 }
898 break;
899 }
900 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
901 if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
902 result = VK_NOT_READY;
903 break;
904
905 }
906
907 const uint64_t *start = (uint64_t*)src;
908 const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
909 if (flags & VK_QUERY_RESULT_64_BIT) {
910 uint64_t *dst = (uint64_t*)dest;
911 dest += util_bitcount(pool->pipeline_stats_mask) * 8;
912 for(int i = 0; i < 11; ++i)
913 if(pool->pipeline_stats_mask & (1u << i))
914 *dst++ = stop[pipeline_statistics_indices[i]] -
915 start[pipeline_statistics_indices[i]];
916
917 } else {
918 uint32_t *dst = (uint32_t*)dest;
919 dest += util_bitcount(pool->pipeline_stats_mask) * 4;
920 for(int i = 0; i < 11; ++i)
921 if(pool->pipeline_stats_mask & (1u << i))
922 *dst++ = stop[pipeline_statistics_indices[i]] -
923 start[pipeline_statistics_indices[i]];
924 }
925 break;
926 }
927 default:
928 unreachable("trying to get results of unhandled query type");
929 }
930
931 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
932 if (flags & VK_QUERY_RESULT_64_BIT) {
933 *(uint64_t*)dest = available;
934 } else {
935 *(uint32_t*)dest = available;
936 }
937 }
938 }
939
940 return result;
941 }
942
943 void radv_CmdCopyQueryPoolResults(
944 VkCommandBuffer commandBuffer,
945 VkQueryPool queryPool,
946 uint32_t firstQuery,
947 uint32_t queryCount,
948 VkBuffer dstBuffer,
949 VkDeviceSize dstOffset,
950 VkDeviceSize stride,
951 VkQueryResultFlags flags)
952 {
953 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
954 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
955 RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
956 struct radeon_winsys_cs *cs = cmd_buffer->cs;
957 unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
958 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
959 uint64_t dest_va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
960 dest_va += dst_buffer->offset + dstOffset;
961
962 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
963 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
964
965 switch (pool->type) {
966 case VK_QUERY_TYPE_OCCLUSION:
967 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
968 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
969 unsigned query = firstQuery + i;
970 uint64_t src_va = va + query * pool->stride + pool->stride - 4;
971
972 /* Waits on the upper word of the last DB entry */
973 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
974 radeon_emit(cs, 5 | WAIT_REG_MEM_MEM_SPACE(1));
975 radeon_emit(cs, src_va);
976 radeon_emit(cs, src_va >> 32);
977 radeon_emit(cs, 0x80000000); /* reference value */
978 radeon_emit(cs, 0xffffffff); /* mask */
979 radeon_emit(cs, 4); /* poll interval */
980 }
981 }
982 radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
983 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
984 dst_buffer->offset + dstOffset,
985 get_max_db(cmd_buffer->device) * 16, stride,
986 queryCount, flags, 0, 0);
987 break;
988 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
989 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
990 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
991 unsigned query = firstQuery + i;
992
993 radeon_check_space(cmd_buffer->device->ws, cs, 7);
994
995 uint64_t avail_va = va + pool->availability_offset + 4 * query;
996
997 /* This waits on the ME. All copies below are done on the ME */
998 si_emit_wait_fence(cs, avail_va, 1, 0xffffffff);
999 }
1000 }
1001 radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1002 pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1003 dst_buffer->offset + dstOffset,
1004 pipelinestat_block_size * 2, stride, queryCount, flags,
1005 pool->pipeline_stats_mask,
1006 pool->availability_offset + 4 * firstQuery);
1007 break;
1008 case VK_QUERY_TYPE_TIMESTAMP:
1009 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1010 unsigned query = firstQuery + i;
1011 uint64_t local_src_va = va + query * pool->stride;
1012
1013 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
1014
1015
1016 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1017 /* TODO, not sure if there is any case where we won't always be ready yet */
1018 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1019
1020 /* This waits on the ME. All copies below are done on the ME */
1021 si_emit_wait_fence(cs, avail_va, 1, 0xffffffff);
1022 }
1023 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1024 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1025 uint64_t avail_dest_va = dest_va + elem_size;
1026
1027 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1028 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1029 COPY_DATA_DST_SEL(COPY_DATA_MEM));
1030 radeon_emit(cs, avail_va);
1031 radeon_emit(cs, avail_va >> 32);
1032 radeon_emit(cs, avail_dest_va);
1033 radeon_emit(cs, avail_dest_va >> 32);
1034 }
1035
1036 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1037 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1038 COPY_DATA_DST_SEL(COPY_DATA_MEM) |
1039 ((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
1040 radeon_emit(cs, local_src_va);
1041 radeon_emit(cs, local_src_va >> 32);
1042 radeon_emit(cs, dest_va);
1043 radeon_emit(cs, dest_va >> 32);
1044
1045
1046 assert(cs->cdw <= cdw_max);
1047 }
1048 break;
1049 default:
1050 unreachable("trying to get results of unhandled query type");
1051 }
1052
1053 }
1054
1055 void radv_CmdResetQueryPool(
1056 VkCommandBuffer commandBuffer,
1057 VkQueryPool queryPool,
1058 uint32_t firstQuery,
1059 uint32_t queryCount)
1060 {
1061 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1062 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1063 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
1064
1065 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
1066
1067 si_cp_dma_clear_buffer(cmd_buffer, va + firstQuery * pool->stride,
1068 queryCount * pool->stride, 0);
1069 if (pool->type == VK_QUERY_TYPE_TIMESTAMP ||
1070 pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS)
1071 si_cp_dma_clear_buffer(cmd_buffer, va + pool->availability_offset + firstQuery * 4,
1072 queryCount * 4, 0);
1073 }
1074
1075 void radv_CmdBeginQuery(
1076 VkCommandBuffer commandBuffer,
1077 VkQueryPool queryPool,
1078 uint32_t query,
1079 VkQueryControlFlags flags)
1080 {
1081 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1082 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1083 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1084 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
1085 va += pool->stride * query;
1086
1087 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
1088
1089 switch (pool->type) {
1090 case VK_QUERY_TYPE_OCCLUSION:
1091 radeon_check_space(cmd_buffer->device->ws, cs, 7);
1092
1093 ++cmd_buffer->state.active_occlusion_queries;
1094 if (cmd_buffer->state.active_occlusion_queries == 1)
1095 radv_set_db_count_control(cmd_buffer);
1096
1097 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1098 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1099 radeon_emit(cs, va);
1100 radeon_emit(cs, va >> 32);
1101 break;
1102 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1103 radeon_check_space(cmd_buffer->device->ws, cs, 4);
1104
1105 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1106 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1107 radeon_emit(cs, va);
1108 radeon_emit(cs, va >> 32);
1109 break;
1110 default:
1111 unreachable("beginning unhandled query type");
1112 }
1113 }
1114
1115
1116 void radv_CmdEndQuery(
1117 VkCommandBuffer commandBuffer,
1118 VkQueryPool queryPool,
1119 uint32_t query)
1120 {
1121 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1122 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1123 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1124 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
1125 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1126 va += pool->stride * query;
1127
1128 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
1129
1130 switch (pool->type) {
1131 case VK_QUERY_TYPE_OCCLUSION:
1132 radeon_check_space(cmd_buffer->device->ws, cs, 14);
1133
1134 cmd_buffer->state.active_occlusion_queries--;
1135 if (cmd_buffer->state.active_occlusion_queries == 0)
1136 radv_set_db_count_control(cmd_buffer);
1137
1138 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1139 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1140 radeon_emit(cs, va + 8);
1141 radeon_emit(cs, (va + 8) >> 32);
1142
1143 break;
1144 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1145 radeon_check_space(cmd_buffer->device->ws, cs, 16);
1146
1147 va += pipelinestat_block_size;
1148
1149 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1150 radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1151 radeon_emit(cs, va);
1152 radeon_emit(cs, va >> 32);
1153
1154 si_cs_emit_write_event_eop(cs,
1155 cmd_buffer->device->physical_device->rad_info.chip_class,
1156 false,
1157 EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
1158 1, avail_va, 0, 1);
1159 break;
1160 default:
1161 unreachable("ending unhandled query type");
1162 }
1163 }
1164
1165 void radv_CmdWriteTimestamp(
1166 VkCommandBuffer commandBuffer,
1167 VkPipelineStageFlagBits pipelineStage,
1168 VkQueryPool queryPool,
1169 uint32_t query)
1170 {
1171 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1172 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1173 bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1174 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1175 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
1176 uint64_t avail_va = va + pool->availability_offset + 4 * query;
1177 uint64_t query_va = va + pool->stride * query;
1178
1179 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
1180
1181 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28);
1182
1183 switch(pipelineStage) {
1184 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1185 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1186 radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1187 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
1188 COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
1189 radeon_emit(cs, 0);
1190 radeon_emit(cs, 0);
1191 radeon_emit(cs, query_va);
1192 radeon_emit(cs, query_va >> 32);
1193
1194 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1195 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1196 S_370_WR_CONFIRM(1) |
1197 S_370_ENGINE_SEL(V_370_ME));
1198 radeon_emit(cs, avail_va);
1199 radeon_emit(cs, avail_va >> 32);
1200 radeon_emit(cs, 1);
1201 break;
1202 default:
1203 si_cs_emit_write_event_eop(cs,
1204 cmd_buffer->device->physical_device->rad_info.chip_class,
1205 mec,
1206 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1207 3, query_va, 0, 0);
1208 si_cs_emit_write_event_eop(cs,
1209 cmd_buffer->device->physical_device->rad_info.chip_class,
1210 mec,
1211 V_028A90_BOTTOM_OF_PIPE_TS, 0,
1212 1, avail_va, 0, 1);
1213 break;
1214 }
1215
1216 assert(cmd_buffer->cs->cdw <= cdw_max);
1217 }