2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 #define TIMESTAMP_NOT_READY UINT64_MAX
40 static const int pipelinestat_block_size
= 11 * 8;
41 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
43 static nir_ssa_def
*nir_test_flag(nir_builder
*b
, nir_ssa_def
*flags
, uint32_t flag
)
45 return nir_i2b(b
, nir_iand(b
, flags
, nir_imm_int(b
, flag
)));
48 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
50 nir_ssa_def
*counter
= nir_load_var(b
, var
);
52 nir_if
*if_stmt
= nir_if_create(b
->shader
);
53 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
54 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
56 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
58 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
59 nir_builder_instr_insert(b
, &instr
->instr
);
61 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
62 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
63 nir_store_var(b
, var
, counter
, 0x1);
66 static struct nir_ssa_def
*
67 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
69 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
70 nir_intrinsic_set_base(flags
, 0);
71 nir_intrinsic_set_range(flags
, 16);
72 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
73 flags
->num_components
= 1;
74 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
75 nir_builder_instr_insert(b
, &flags
->instr
);
76 return &flags
->dest
.ssa
;
80 build_occlusion_query_shader(struct radv_device
*device
) {
81 /* the shader this builds is roughly
85 * uint32_t dst_stride;
88 * uint32_t src_stride = 16 * db_count;
90 * location(binding = 0) buffer dst_buf;
91 * location(binding = 1) buffer src_buf;
94 * uint64_t result = 0;
95 * uint64_t src_offset = src_stride * global_id.x;
96 * uint64_t dst_offset = dst_stride * global_id.x;
97 * bool available = true;
98 * for (int i = 0; i < db_count; ++i) {
99 * if (enabled_rb_mask & (1 << i)) {
100 * uint64_t start = src_buf[src_offset + 16 * i];
101 * uint64_t end = src_buf[src_offset + 16 * i + 8];
102 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
103 * result += end - start;
108 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
109 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
110 * if (flags & VK_QUERY_RESULT_64_BIT)
111 * dst_buf[dst_offset] = result;
113 * dst_buf[dst_offset] = (uint32_t)result.
115 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
116 * dst_buf[dst_offset + elem_size] = available;
121 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
122 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
123 b
.shader
->info
.cs
.local_size
[0] = 64;
124 b
.shader
->info
.cs
.local_size
[1] = 1;
125 b
.shader
->info
.cs
.local_size
[2] = 1;
127 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
128 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
129 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
130 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
131 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
132 unsigned enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
133 unsigned db_count
= device
->physical_device
->rad_info
.num_render_backends
;
135 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
137 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
138 nir_intrinsic_vulkan_resource_index
);
139 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
140 dst_buf
->num_components
= 1;
141 nir_intrinsic_set_desc_set(dst_buf
, 0);
142 nir_intrinsic_set_binding(dst_buf
, 0);
143 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
144 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
146 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
147 nir_intrinsic_vulkan_resource_index
);
148 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
149 src_buf
->num_components
= 1;
150 nir_intrinsic_set_desc_set(src_buf
, 0);
151 nir_intrinsic_set_binding(src_buf
, 1);
152 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
153 nir_builder_instr_insert(&b
, &src_buf
->instr
);
155 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
156 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
157 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
158 b
.shader
->info
.cs
.local_size
[0],
159 b
.shader
->info
.cs
.local_size
[1],
160 b
.shader
->info
.cs
.local_size
[2], 0);
161 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
162 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
164 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
165 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
166 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
167 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
170 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
171 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
172 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
174 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
175 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
176 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
178 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
179 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
181 nir_ssa_def
*enabled_cond
=
182 nir_iand(&b
, nir_imm_int(&b
, enabled_rb_mask
),
183 nir_ishl(&b
, nir_imm_int(&b
, 1), current_outer_count
));
185 nir_if
*enabled_if
= nir_if_create(b
.shader
);
186 enabled_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, enabled_cond
));
187 nir_cf_node_insert(b
.cursor
, &enabled_if
->cf_node
);
189 b
.cursor
= nir_after_cf_list(&enabled_if
->then_list
);
191 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
192 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
194 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
195 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
196 load
->src
[1] = nir_src_for_ssa(load_offset
);
197 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
198 load
->num_components
= 2;
199 nir_builder_instr_insert(&b
, &load
->instr
);
201 nir_store_var(&b
, start
, nir_channel(&b
, &load
->dest
.ssa
, 0), 0x1);
202 nir_store_var(&b
, end
, nir_channel(&b
, &load
->dest
.ssa
, 1), 0x1);
204 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
205 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
207 nir_if
*update_if
= nir_if_create(b
.shader
);
208 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
209 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
211 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
213 nir_store_var(&b
, result
,
214 nir_iadd(&b
, nir_load_var(&b
, result
),
215 nir_isub(&b
, nir_load_var(&b
, end
),
216 nir_load_var(&b
, start
))), 0x1);
218 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
220 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
222 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
224 /* Store the result if complete or if partial results have been requested. */
226 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
227 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
229 nir_if
*store_if
= nir_if_create(b
.shader
);
230 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
), nir_load_var(&b
, available
)));
231 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
233 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
235 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
236 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
237 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
239 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
241 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
242 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
243 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
244 store
->src
[2] = nir_src_for_ssa(output_base
);
245 nir_intrinsic_set_write_mask(store
, 0x1);
246 store
->num_components
= 1;
247 nir_builder_instr_insert(&b
, &store
->instr
);
249 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
251 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
252 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
253 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
254 store
->src
[2] = nir_src_for_ssa(output_base
);
255 nir_intrinsic_set_write_mask(store
, 0x1);
256 store
->num_components
= 1;
257 nir_builder_instr_insert(&b
, &store
->instr
);
259 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
261 /* Store the availability bit if requested. */
263 nir_if
*availability_if
= nir_if_create(b
.shader
);
264 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
265 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
267 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
269 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
270 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
271 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
272 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
273 nir_intrinsic_set_write_mask(store
, 0x1);
274 store
->num_components
= 1;
275 nir_builder_instr_insert(&b
, &store
->instr
);
281 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
282 /* the shader this builds is roughly
286 * uint32_t dst_stride;
287 * uint32_t stats_mask;
288 * uint32_t avail_offset;
291 * uint32_t src_stride = pipelinestat_block_size * 2;
293 * location(binding = 0) buffer dst_buf;
294 * location(binding = 1) buffer src_buf;
297 * uint64_t src_offset = src_stride * global_id.x;
298 * uint64_t dst_base = dst_stride * global_id.x;
299 * uint64_t dst_offset = dst_base;
300 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
301 * uint32_t elem_count = stats_mask >> 16;
302 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
303 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
304 * dst_buf[dst_offset + elem_count * elem_size] = available32;
306 * if ((bool)available32) {
307 * // repeat 11 times:
308 * if (stats_mask & (1 << 0)) {
309 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
310 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
311 * uint64_t result = end - start;
312 * if (flags & VK_QUERY_RESULT_64_BIT)
313 * dst_buf[dst_offset] = result;
315 * dst_buf[dst_offset] = (uint32_t)result.
316 * dst_offset += elem_size;
318 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
319 * // Set everything to 0 as we don't know what is valid.
320 * for (int i = 0; i < elem_count; ++i)
321 * dst_buf[dst_base + elem_size * i] = 0;
326 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
327 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
328 b
.shader
->info
.cs
.local_size
[0] = 64;
329 b
.shader
->info
.cs
.local_size
[1] = 1;
330 b
.shader
->info
.cs
.local_size
[2] = 1;
332 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
334 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
335 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
336 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
338 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
339 nir_intrinsic_vulkan_resource_index
);
340 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
341 dst_buf
->num_components
= 1;;
342 nir_intrinsic_set_desc_set(dst_buf
, 0);
343 nir_intrinsic_set_binding(dst_buf
, 0);
344 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
345 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
347 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
348 nir_intrinsic_vulkan_resource_index
);
349 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
350 src_buf
->num_components
= 1;
351 nir_intrinsic_set_desc_set(src_buf
, 0);
352 nir_intrinsic_set_binding(src_buf
, 1);
353 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
354 nir_builder_instr_insert(&b
, &src_buf
->instr
);
356 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
357 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
358 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
359 b
.shader
->info
.cs
.local_size
[0],
360 b
.shader
->info
.cs
.local_size
[1],
361 b
.shader
->info
.cs
.local_size
[2], 0);
362 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
363 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
365 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
366 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
367 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
368 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
371 avail_offset
= nir_iadd(&b
, avail_offset
,
372 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
374 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
375 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
376 load
->src
[1] = nir_src_for_ssa(avail_offset
);
377 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
378 load
->num_components
= 1;
379 nir_builder_instr_insert(&b
, &load
->instr
);
380 nir_ssa_def
*available32
= &load
->dest
.ssa
;
382 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
383 nir_ssa_def
*elem_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
384 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
386 /* Store the availability bit if requested. */
388 nir_if
*availability_if
= nir_if_create(b
.shader
);
389 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
390 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
392 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
394 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
395 store
->src
[0] = nir_src_for_ssa(available32
);
396 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
397 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)));
398 nir_intrinsic_set_write_mask(store
, 0x1);
399 store
->num_components
= 1;
400 nir_builder_instr_insert(&b
, &store
->instr
);
402 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
404 nir_if
*available_if
= nir_if_create(b
.shader
);
405 available_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, available32
));
406 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
408 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
410 nir_store_var(&b
, output_offset
, output_base
, 0x1);
411 for (int i
= 0; i
< 11; ++i
) {
412 nir_if
*store_if
= nir_if_create(b
.shader
);
413 store_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, stats_mask
, 1u << i
));
414 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
416 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
418 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
419 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
420 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
421 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
422 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
423 load
->num_components
= 1;
424 nir_builder_instr_insert(&b
, &load
->instr
);
425 nir_ssa_def
*start
= &load
->dest
.ssa
;
427 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
428 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
429 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
430 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
431 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
432 load
->num_components
= 1;
433 nir_builder_instr_insert(&b
, &load
->instr
);
434 nir_ssa_def
*end
= &load
->dest
.ssa
;
436 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
439 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
440 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
441 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
443 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
445 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
446 store
->src
[0] = nir_src_for_ssa(result
);
447 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
448 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
449 nir_intrinsic_set_write_mask(store
, 0x1);
450 store
->num_components
= 1;
451 nir_builder_instr_insert(&b
, &store
->instr
);
453 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
455 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
456 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
457 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
458 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
459 nir_intrinsic_set_write_mask(store
, 0x1);
460 store
->num_components
= 1;
461 nir_builder_instr_insert(&b
, &store
->instr
);
463 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
465 nir_store_var(&b
, output_offset
,
466 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
469 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
472 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
474 available_if
= nir_if_create(b
.shader
);
475 available_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
));
476 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
478 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
480 /* Stores zeros in all outputs. */
482 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
483 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
485 nir_loop
*loop
= nir_loop_create(b
.shader
);
486 nir_builder_cf_insert(&b
, &loop
->cf_node
);
487 b
.cursor
= nir_after_cf_list(&loop
->body
);
489 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
490 radv_break_on_count(&b
, counter
, elem_count
);
492 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
493 nir_imul(&b
, elem_size
, current_counter
));
495 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
496 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
497 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
499 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
501 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
502 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
503 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
504 store
->src
[2] = nir_src_for_ssa(output_elem
);
505 nir_intrinsic_set_write_mask(store
, 0x1);
506 store
->num_components
= 1;
507 nir_builder_instr_insert(&b
, &store
->instr
);
509 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
511 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
512 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
513 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
514 store
->src
[2] = nir_src_for_ssa(output_elem
);
515 nir_intrinsic_set_write_mask(store
, 0x1);
516 store
->num_components
= 1;
517 nir_builder_instr_insert(&b
, &store
->instr
);
519 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
524 build_tfb_query_shader(struct radv_device
*device
)
526 /* the shader this builds is roughly
528 * uint32_t src_stride = 32;
530 * location(binding = 0) buffer dst_buf;
531 * location(binding = 1) buffer src_buf;
534 * uint64_t result[2] = {};
535 * bool available = false;
536 * uint64_t src_offset = src_stride * global_id.x;
537 * uint64_t dst_offset = dst_stride * global_id.x;
538 * uint64_t *src_data = src_buf[src_offset];
539 * uint32_t avail = (src_data[0] >> 32) &
540 * (src_data[1] >> 32) &
541 * (src_data[2] >> 32) &
542 * (src_data[3] >> 32);
543 * if (avail & 0x80000000) {
544 * result[0] = src_data[3] - src_data[1];
545 * result[1] = src_data[2] - src_data[0];
548 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
549 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
550 * if (flags & VK_QUERY_RESULT_64_BIT) {
551 * dst_buf[dst_offset] = result;
553 * dst_buf[dst_offset] = (uint32_t)result;
556 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
557 * dst_buf[dst_offset + result_size] = available;
562 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
563 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "tfb_query");
564 b
.shader
->info
.cs
.local_size
[0] = 64;
565 b
.shader
->info
.cs
.local_size
[1] = 1;
566 b
.shader
->info
.cs
.local_size
[2] = 1;
568 /* Create and initialize local variables. */
569 nir_variable
*result
=
570 nir_local_variable_create(b
.impl
,
571 glsl_vector_type(GLSL_TYPE_UINT64
, 2),
573 nir_variable
*available
=
574 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
576 nir_store_var(&b
, result
,
577 nir_vec2(&b
, nir_imm_int64(&b
, 0),
578 nir_imm_int64(&b
, 0)), 0x3);
579 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
581 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
583 /* Load resources. */
584 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
585 nir_intrinsic_vulkan_resource_index
);
586 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
587 dst_buf
->num_components
= 1;
588 nir_intrinsic_set_desc_set(dst_buf
, 0);
589 nir_intrinsic_set_binding(dst_buf
, 0);
590 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
591 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
593 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
594 nir_intrinsic_vulkan_resource_index
);
595 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
596 src_buf
->num_components
= 1;
597 nir_intrinsic_set_desc_set(src_buf
, 0);
598 nir_intrinsic_set_binding(src_buf
, 1);
599 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
600 nir_builder_instr_insert(&b
, &src_buf
->instr
);
602 /* Compute global ID. */
603 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
604 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
605 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
606 b
.shader
->info
.cs
.local_size
[0],
607 b
.shader
->info
.cs
.local_size
[1],
608 b
.shader
->info
.cs
.local_size
[2], 0);
609 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
610 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
612 /* Compute src/dst strides. */
613 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 32);
614 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
615 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
616 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
618 /* Load data from the query pool. */
619 nir_intrinsic_instr
*load1
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
620 load1
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
621 load1
->src
[1] = nir_src_for_ssa(input_base
);
622 nir_ssa_dest_init(&load1
->instr
, &load1
->dest
, 4, 32, NULL
);
623 load1
->num_components
= 4;
624 nir_builder_instr_insert(&b
, &load1
->instr
);
626 nir_intrinsic_instr
*load2
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
627 load2
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
628 load2
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
, nir_imm_int(&b
, 16)));
629 nir_ssa_dest_init(&load2
->instr
, &load2
->dest
, 4, 32, NULL
);
630 load2
->num_components
= 4;
631 nir_builder_instr_insert(&b
, &load2
->instr
);
633 /* Check if result is available. */
634 nir_ssa_def
*avails
[2];
635 avails
[0] = nir_iand(&b
, nir_channel(&b
, &load1
->dest
.ssa
, 1),
636 nir_channel(&b
, &load1
->dest
.ssa
, 3));
637 avails
[1] = nir_iand(&b
, nir_channel(&b
, &load2
->dest
.ssa
, 1),
638 nir_channel(&b
, &load2
->dest
.ssa
, 3));
639 nir_ssa_def
*result_is_available
=
640 nir_i2b(&b
, nir_iand(&b
, nir_iand(&b
, avails
[0], avails
[1]),
641 nir_imm_int(&b
, 0x80000000)));
643 /* Only compute result if available. */
644 nir_if
*available_if
= nir_if_create(b
.shader
);
645 available_if
->condition
= nir_src_for_ssa(result_is_available
);
646 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
648 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
651 nir_ssa_def
*packed64
[4];
652 packed64
[0] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
653 nir_channel(&b
, &load1
->dest
.ssa
, 0),
654 nir_channel(&b
, &load1
->dest
.ssa
, 1)));
655 packed64
[1] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
656 nir_channel(&b
, &load1
->dest
.ssa
, 2),
657 nir_channel(&b
, &load1
->dest
.ssa
, 3)));
658 packed64
[2] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
659 nir_channel(&b
, &load2
->dest
.ssa
, 0),
660 nir_channel(&b
, &load2
->dest
.ssa
, 1)));
661 packed64
[3] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
662 nir_channel(&b
, &load2
->dest
.ssa
, 2),
663 nir_channel(&b
, &load2
->dest
.ssa
, 3)));
665 /* Compute result. */
666 nir_ssa_def
*num_primitive_written
=
667 nir_isub(&b
, packed64
[3], packed64
[1]);
668 nir_ssa_def
*primitive_storage_needed
=
669 nir_isub(&b
, packed64
[2], packed64
[0]);
671 nir_store_var(&b
, result
,
672 nir_vec2(&b
, num_primitive_written
,
673 primitive_storage_needed
), 0x3);
674 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
676 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
678 /* Determine if result is 64 or 32 bit. */
679 nir_ssa_def
*result_is_64bit
=
680 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
681 nir_ssa_def
*result_size
=
682 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 16),
685 /* Store the result if complete or partial results have been requested. */
686 nir_if
*store_if
= nir_if_create(b
.shader
);
687 store_if
->condition
=
688 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
689 nir_load_var(&b
, available
)));
690 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
692 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
695 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
696 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
697 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
699 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
701 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
702 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
703 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
704 store
->src
[2] = nir_src_for_ssa(output_base
);
705 nir_intrinsic_set_write_mask(store
, 0x3);
706 store
->num_components
= 2;
707 nir_builder_instr_insert(&b
, &store
->instr
);
709 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
711 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
712 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
713 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
714 store
->src
[2] = nir_src_for_ssa(output_base
);
715 nir_intrinsic_set_write_mask(store
, 0x3);
716 store
->num_components
= 2;
717 nir_builder_instr_insert(&b
, &store
->instr
);
719 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
721 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
723 /* Store the availability bit if requested. */
724 nir_if
*availability_if
= nir_if_create(b
.shader
);
725 availability_if
->condition
=
726 nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
727 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
729 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
731 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
732 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
733 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
734 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
735 nir_intrinsic_set_write_mask(store
, 0x1);
736 store
->num_components
= 1;
737 nir_builder_instr_insert(&b
, &store
->instr
);
739 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
744 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device
*device
)
747 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
748 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
749 struct radv_shader_module tfb_cs
= { .nir
= NULL
};
751 mtx_lock(&device
->meta_state
.mtx
);
752 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
) {
753 mtx_unlock(&device
->meta_state
.mtx
);
756 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
757 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
758 tfb_cs
.nir
= build_tfb_query_shader(device
);
760 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
761 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
762 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
764 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
767 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
768 .descriptorCount
= 1,
769 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
770 .pImmutableSamplers
= NULL
774 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
775 .descriptorCount
= 1,
776 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
777 .pImmutableSamplers
= NULL
782 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
783 &occlusion_ds_create_info
,
784 &device
->meta_state
.alloc
,
785 &device
->meta_state
.query
.ds_layout
);
786 if (result
!= VK_SUCCESS
)
789 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
790 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
792 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
793 .pushConstantRangeCount
= 1,
794 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
797 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
798 &occlusion_pl_create_info
,
799 &device
->meta_state
.alloc
,
800 &device
->meta_state
.query
.p_layout
);
801 if (result
!= VK_SUCCESS
)
804 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
805 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
806 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
807 .module
= radv_shader_module_to_handle(&occlusion_cs
),
809 .pSpecializationInfo
= NULL
,
812 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
813 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
814 .stage
= occlusion_pipeline_shader_stage
,
816 .layout
= device
->meta_state
.query
.p_layout
,
819 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
820 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
821 1, &occlusion_vk_pipeline_info
, NULL
,
822 &device
->meta_state
.query
.occlusion_query_pipeline
);
823 if (result
!= VK_SUCCESS
)
826 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
827 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
828 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
829 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
831 .pSpecializationInfo
= NULL
,
834 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
835 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
836 .stage
= pipeline_statistics_pipeline_shader_stage
,
838 .layout
= device
->meta_state
.query
.p_layout
,
841 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
842 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
843 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
844 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
845 if (result
!= VK_SUCCESS
)
848 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage
= {
849 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
850 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
851 .module
= radv_shader_module_to_handle(&tfb_cs
),
853 .pSpecializationInfo
= NULL
,
856 VkComputePipelineCreateInfo tfb_pipeline_info
= {
857 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
858 .stage
= tfb_pipeline_shader_stage
,
860 .layout
= device
->meta_state
.query
.p_layout
,
863 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
864 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
865 1, &tfb_pipeline_info
, NULL
,
866 &device
->meta_state
.query
.tfb_query_pipeline
);
868 if (result
!= VK_SUCCESS
)
869 radv_device_finish_meta_query_state(device
);
870 ralloc_free(occlusion_cs
.nir
);
871 ralloc_free(pipeline_statistics_cs
.nir
);
872 ralloc_free(tfb_cs
.nir
);
873 mtx_unlock(&device
->meta_state
.mtx
);
877 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
, bool on_demand
)
882 return radv_device_init_meta_query_state_internal(device
);
885 void radv_device_finish_meta_query_state(struct radv_device
*device
)
887 if (device
->meta_state
.query
.tfb_query_pipeline
)
888 radv_DestroyPipeline(radv_device_to_handle(device
),
889 device
->meta_state
.query
.tfb_query_pipeline
,
890 &device
->meta_state
.alloc
);
892 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
893 radv_DestroyPipeline(radv_device_to_handle(device
),
894 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
895 &device
->meta_state
.alloc
);
897 if (device
->meta_state
.query
.occlusion_query_pipeline
)
898 radv_DestroyPipeline(radv_device_to_handle(device
),
899 device
->meta_state
.query
.occlusion_query_pipeline
,
900 &device
->meta_state
.alloc
);
902 if (device
->meta_state
.query
.p_layout
)
903 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
904 device
->meta_state
.query
.p_layout
,
905 &device
->meta_state
.alloc
);
907 if (device
->meta_state
.query
.ds_layout
)
908 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
909 device
->meta_state
.query
.ds_layout
,
910 &device
->meta_state
.alloc
);
913 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
914 VkPipeline
*pipeline
,
915 struct radeon_winsys_bo
*src_bo
,
916 struct radeon_winsys_bo
*dst_bo
,
917 uint64_t src_offset
, uint64_t dst_offset
,
918 uint32_t src_stride
, uint32_t dst_stride
,
919 uint32_t count
, uint32_t flags
,
920 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
922 struct radv_device
*device
= cmd_buffer
->device
;
923 struct radv_meta_saved_state saved_state
;
924 bool old_predicating
;
927 VkResult ret
= radv_device_init_meta_query_state_internal(device
);
928 if (ret
!= VK_SUCCESS
) {
929 cmd_buffer
->record_result
= ret
;
934 radv_meta_save(&saved_state
, cmd_buffer
,
935 RADV_META_SAVE_COMPUTE_PIPELINE
|
936 RADV_META_SAVE_CONSTANTS
|
937 RADV_META_SAVE_DESCRIPTORS
);
939 /* VK_EXT_conditional_rendering says that copy commands should not be
940 * affected by conditional rendering.
942 old_predicating
= cmd_buffer
->state
.predicating
;
943 cmd_buffer
->state
.predicating
= false;
945 struct radv_buffer dst_buffer
= {
947 .offset
= dst_offset
,
948 .size
= dst_stride
* count
951 struct radv_buffer src_buffer
= {
953 .offset
= src_offset
,
954 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
957 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
958 VK_PIPELINE_BIND_POINT_COMPUTE
, *pipeline
);
960 radv_meta_push_descriptor_set(cmd_buffer
,
961 VK_PIPELINE_BIND_POINT_COMPUTE
,
962 device
->meta_state
.query
.p_layout
,
964 2, /* descriptorWriteCount */
965 (VkWriteDescriptorSet
[]) {
967 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
969 .dstArrayElement
= 0,
970 .descriptorCount
= 1,
971 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
972 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
973 .buffer
= radv_buffer_to_handle(&dst_buffer
),
975 .range
= VK_WHOLE_SIZE
979 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
981 .dstArrayElement
= 0,
982 .descriptorCount
= 1,
983 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
984 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
985 .buffer
= radv_buffer_to_handle(&src_buffer
),
987 .range
= VK_WHOLE_SIZE
992 /* Encode the number of elements for easy access by the shader. */
993 pipeline_stats_mask
&= 0x7ff;
994 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
996 avail_offset
-= src_offset
;
1000 uint32_t dst_stride
;
1001 uint32_t pipeline_stats_mask
;
1002 uint32_t avail_offset
;
1003 } push_constants
= {
1006 pipeline_stats_mask
,
1010 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
1011 device
->meta_state
.query
.p_layout
,
1012 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
1015 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_L2
|
1016 RADV_CMD_FLAG_INV_VCACHE
;
1018 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1019 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
1021 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
1023 /* Restore conditional rendering. */
1024 cmd_buffer
->state
.predicating
= old_predicating
;
1026 radv_meta_restore(&saved_state
, cmd_buffer
);
1029 VkResult
radv_CreateQueryPool(
1031 const VkQueryPoolCreateInfo
* pCreateInfo
,
1032 const VkAllocationCallbacks
* pAllocator
,
1033 VkQueryPool
* pQueryPool
)
1035 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1036 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
1038 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1041 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1044 switch(pCreateInfo
->queryType
) {
1045 case VK_QUERY_TYPE_OCCLUSION
:
1046 pool
->stride
= 16 * device
->physical_device
->rad_info
.num_render_backends
;
1048 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1049 pool
->stride
= pipelinestat_block_size
* 2;
1051 case VK_QUERY_TYPE_TIMESTAMP
:
1054 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1058 unreachable("creating unhandled query type");
1061 pool
->type
= pCreateInfo
->queryType
;
1062 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
1063 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
1064 pool
->size
= pool
->availability_offset
;
1065 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1066 pool
->size
+= 4 * pCreateInfo
->queryCount
;
1068 pool
->bo
= device
->ws
->buffer_create(device
->ws
, pool
->size
,
1069 64, RADEON_DOMAIN_GTT
, RADEON_FLAG_NO_INTERPROCESS_SHARING
,
1070 RADV_BO_PRIORITY_QUERY_POOL
);
1073 vk_free2(&device
->alloc
, pAllocator
, pool
);
1074 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1077 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
1080 device
->ws
->buffer_destroy(pool
->bo
);
1081 vk_free2(&device
->alloc
, pAllocator
, pool
);
1082 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1085 *pQueryPool
= radv_query_pool_to_handle(pool
);
1089 void radv_DestroyQueryPool(
1092 const VkAllocationCallbacks
* pAllocator
)
1094 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1095 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
1100 device
->ws
->buffer_destroy(pool
->bo
);
1101 vk_free2(&device
->alloc
, pAllocator
, pool
);
1104 VkResult
radv_GetQueryPoolResults(
1106 VkQueryPool queryPool
,
1107 uint32_t firstQuery
,
1108 uint32_t queryCount
,
1111 VkDeviceSize stride
,
1112 VkQueryResultFlags flags
)
1114 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1115 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1117 VkResult result
= VK_SUCCESS
;
1119 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
1121 unsigned query
= firstQuery
+ i
;
1122 char *src
= pool
->ptr
+ query
* pool
->stride
;
1125 switch (pool
->type
) {
1126 case VK_QUERY_TYPE_TIMESTAMP
: {
1127 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1129 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1130 while (*(volatile uint64_t *)src
== TIMESTAMP_NOT_READY
)
1132 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1135 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1136 result
= VK_NOT_READY
;
1138 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1139 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1140 *(uint64_t*)dest
= *(uint64_t*)src
;
1143 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1144 *(uint32_t*)dest
= *(uint32_t*)src
;
1149 case VK_QUERY_TYPE_OCCLUSION
: {
1150 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1151 uint32_t db_count
= device
->physical_device
->rad_info
.num_render_backends
;
1152 uint32_t enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
1153 uint64_t sample_count
= 0;
1156 for (int i
= 0; i
< db_count
; ++i
) {
1157 uint64_t start
, end
;
1159 if (!(enabled_rb_mask
& (1 << i
)))
1163 start
= src64
[2 * i
];
1164 end
= src64
[2 * i
+ 1];
1165 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
1167 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
1170 sample_count
+= end
- start
;
1174 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1175 result
= VK_NOT_READY
;
1177 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1178 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1179 *(uint64_t*)dest
= sample_count
;
1182 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1183 *(uint32_t*)dest
= sample_count
;
1188 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
1189 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1190 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
1192 available
= *(uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
1194 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1195 result
= VK_NOT_READY
;
1197 const uint64_t *start
= (uint64_t*)src
;
1198 const uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
1199 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1200 uint64_t *dst
= (uint64_t*)dest
;
1201 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
1202 for(int i
= 0; i
< 11; ++i
) {
1203 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1204 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1205 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1206 start
[pipeline_statistics_indices
[i
]];
1212 uint32_t *dst
= (uint32_t*)dest
;
1213 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
1214 for(int i
= 0; i
< 11; ++i
) {
1215 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1216 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1217 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1218 start
[pipeline_statistics_indices
[i
]];
1225 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
: {
1226 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1227 uint64_t num_primitives_written
;
1228 uint64_t primitive_storage_needed
;
1230 /* SAMPLE_STREAMOUTSTATS stores this structure:
1232 * u64 NumPrimitivesWritten;
1233 * u64 PrimitiveStorageNeeded;
1237 for (int j
= 0; j
< 4; j
++) {
1238 if (!(src64
[j
] & 0x8000000000000000UL
))
1242 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1243 result
= VK_NOT_READY
;
1245 num_primitives_written
= src64
[3] - src64
[1];
1246 primitive_storage_needed
= src64
[2] - src64
[0];
1248 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1249 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1250 *(uint64_t *)dest
= num_primitives_written
;
1252 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1253 *(uint64_t *)dest
= primitive_storage_needed
;
1256 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1257 *(uint32_t *)dest
= num_primitives_written
;
1259 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1260 *(uint32_t *)dest
= primitive_storage_needed
;
1266 unreachable("trying to get results of unhandled query type");
1269 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1270 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1271 *(uint64_t*)dest
= available
;
1273 *(uint32_t*)dest
= available
;
1281 static void emit_query_flush(struct radv_cmd_buffer
*cmd_buffer
,
1282 struct radv_query_pool
*pool
)
1284 if (cmd_buffer
->pending_reset_query
) {
1285 if (pool
->size
>= RADV_BUFFER_OPS_CS_THRESHOLD
) {
1286 /* Only need to flush caches if the query pool size is
1287 * large enough to be resetted using the compute shader
1288 * path. Small pools don't need any cache flushes
1289 * because we use a CP dma clear.
1291 si_emit_cache_flush(cmd_buffer
);
1296 void radv_CmdCopyQueryPoolResults(
1297 VkCommandBuffer commandBuffer
,
1298 VkQueryPool queryPool
,
1299 uint32_t firstQuery
,
1300 uint32_t queryCount
,
1302 VkDeviceSize dstOffset
,
1303 VkDeviceSize stride
,
1304 VkQueryResultFlags flags
)
1306 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1307 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1308 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
1309 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1310 unsigned elem_size
= (flags
& VK_QUERY_RESULT_64_BIT
) ? 8 : 4;
1311 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1312 uint64_t dest_va
= radv_buffer_get_va(dst_buffer
->bo
);
1313 dest_va
+= dst_buffer
->offset
+ dstOffset
;
1315 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, pool
->bo
);
1316 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, dst_buffer
->bo
);
1318 /* From the Vulkan spec 1.1.108:
1320 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1321 * previous uses of vkCmdResetQueryPool in the same queue, without any
1322 * additional synchronization."
1324 * So, we have to flush the caches if the compute shader path was used.
1326 emit_query_flush(cmd_buffer
, pool
);
1328 switch (pool
->type
) {
1329 case VK_QUERY_TYPE_OCCLUSION
:
1330 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1331 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1332 unsigned query
= firstQuery
+ i
;
1333 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
1335 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1337 /* Waits on the upper word of the last DB entry */
1338 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1339 src_va
, 0x80000000, 0xffffffff);
1342 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
1343 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1344 dst_buffer
->offset
+ dstOffset
,
1345 pool
->stride
, stride
,
1346 queryCount
, flags
, 0, 0);
1348 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1349 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1350 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1351 unsigned query
= firstQuery
+ i
;
1353 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1355 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1357 /* This waits on the ME. All copies below are done on the ME */
1358 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
,
1359 avail_va
, 1, 0xffffffff);
1362 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1363 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1364 dst_buffer
->offset
+ dstOffset
,
1365 pool
->stride
, stride
, queryCount
, flags
,
1366 pool
->pipeline_stats_mask
,
1367 pool
->availability_offset
+ 4 * firstQuery
);
1369 case VK_QUERY_TYPE_TIMESTAMP
:
1370 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1371 unsigned query
= firstQuery
+ i
;
1372 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1374 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 19);
1377 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1378 /* Wait on the high 32 bits of the timestamp in
1379 * case the low part is 0xffffffff.
1381 radv_cp_wait_mem(cs
, WAIT_REG_MEM_NOT_EQUAL
,
1383 TIMESTAMP_NOT_READY
>> 32,
1386 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1387 uint64_t avail_dest_va
= dest_va
+ elem_size
;
1389 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1390 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1391 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
));
1392 radeon_emit(cs
, local_src_va
);
1393 radeon_emit(cs
, local_src_va
>> 32);
1394 radeon_emit(cs
, avail_dest_va
);
1395 radeon_emit(cs
, avail_dest_va
>> 32);
1398 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1399 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1400 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
) |
1401 ((flags
& VK_QUERY_RESULT_64_BIT
) ? COPY_DATA_COUNT_SEL
: 0));
1402 radeon_emit(cs
, local_src_va
);
1403 radeon_emit(cs
, local_src_va
>> 32);
1404 radeon_emit(cs
, dest_va
);
1405 radeon_emit(cs
, dest_va
>> 32);
1408 assert(cs
->cdw
<= cdw_max
);
1411 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1412 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1413 for(unsigned i
= 0; i
< queryCount
; i
++) {
1414 unsigned query
= firstQuery
+ i
;
1415 uint64_t src_va
= va
+ query
* pool
->stride
;
1417 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7 * 4);
1419 /* Wait on the upper word of all results. */
1420 for (unsigned j
= 0; j
< 4; j
++, src_va
+= 8) {
1421 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1422 src_va
+ 4, 0x80000000,
1428 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.tfb_query_pipeline
,
1429 pool
->bo
, dst_buffer
->bo
,
1430 firstQuery
* pool
->stride
,
1431 dst_buffer
->offset
+ dstOffset
,
1432 pool
->stride
, stride
,
1433 queryCount
, flags
, 0, 0);
1436 unreachable("trying to get results of unhandled query type");
1441 void radv_CmdResetQueryPool(
1442 VkCommandBuffer commandBuffer
,
1443 VkQueryPool queryPool
,
1444 uint32_t firstQuery
,
1445 uint32_t queryCount
)
1447 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1448 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1449 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1450 ? TIMESTAMP_NOT_READY
: 0;
1451 uint32_t flush_bits
= 0;
1453 /* Make sure to sync all previous work if the given command buffer has
1454 * pending active queries. Otherwise the GPU might write queries data
1455 * after the reset operation.
1457 cmd_buffer
->state
.flush_bits
|= cmd_buffer
->active_query_flush_bits
;
1459 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1460 firstQuery
* pool
->stride
,
1461 queryCount
* pool
->stride
, value
);
1463 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1464 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1465 pool
->availability_offset
+ firstQuery
* 4,
1470 /* Only need to flush caches for the compute shader path. */
1471 cmd_buffer
->pending_reset_query
= true;
1472 cmd_buffer
->state
.flush_bits
|= flush_bits
;
1476 void radv_ResetQueryPoolEXT(
1478 VkQueryPool queryPool
,
1479 uint32_t firstQuery
,
1480 uint32_t queryCount
)
1482 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1484 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1485 ? TIMESTAMP_NOT_READY
: 0;
1486 uint32_t *data
= (uint32_t*)(pool
->ptr
+ firstQuery
* pool
->stride
);
1487 uint32_t *data_end
= (uint32_t*)(pool
->ptr
+ (firstQuery
+ queryCount
) * pool
->stride
);
1489 for(uint32_t *p
= data
; p
!= data_end
; ++p
)
1492 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1493 memset(pool
->ptr
+ pool
->availability_offset
+ firstQuery
* 4,
1498 static unsigned event_type_for_stream(unsigned stream
)
1502 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
1503 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
1504 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
1505 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
1509 static void emit_begin_query(struct radv_cmd_buffer
*cmd_buffer
,
1511 VkQueryType query_type
,
1512 VkQueryControlFlags flags
,
1515 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1516 switch (query_type
) {
1517 case VK_QUERY_TYPE_OCCLUSION
:
1518 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1520 ++cmd_buffer
->state
.active_occlusion_queries
;
1521 if (cmd_buffer
->state
.active_occlusion_queries
== 1) {
1522 if (flags
& VK_QUERY_CONTROL_PRECISE_BIT
) {
1523 /* This is the first occlusion query, enable
1524 * the hint if the precision bit is set.
1526 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1529 radv_set_db_count_control(cmd_buffer
);
1531 if ((flags
& VK_QUERY_CONTROL_PRECISE_BIT
) &&
1532 !cmd_buffer
->state
.perfect_occlusion_queries_enabled
) {
1533 /* This is not the first query, but this one
1534 * needs to enable precision, DB_COUNT_CONTROL
1535 * has to be updated accordingly.
1537 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1539 radv_set_db_count_control(cmd_buffer
);
1543 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1544 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1545 radeon_emit(cs
, va
);
1546 radeon_emit(cs
, va
>> 32);
1548 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1549 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1551 ++cmd_buffer
->state
.active_pipeline_queries
;
1552 if (cmd_buffer
->state
.active_pipeline_queries
== 1) {
1553 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1554 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_START_PIPELINE_STATS
;
1557 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1558 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1559 radeon_emit(cs
, va
);
1560 radeon_emit(cs
, va
>> 32);
1562 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1563 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1565 assert(index
< MAX_SO_STREAMS
);
1567 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1568 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1569 radeon_emit(cs
, va
);
1570 radeon_emit(cs
, va
>> 32);
1573 unreachable("beginning unhandled query type");
1578 static void emit_end_query(struct radv_cmd_buffer
*cmd_buffer
,
1579 uint64_t va
, uint64_t avail_va
,
1580 VkQueryType query_type
, uint32_t index
)
1582 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1583 switch (query_type
) {
1584 case VK_QUERY_TYPE_OCCLUSION
:
1585 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1587 cmd_buffer
->state
.active_occlusion_queries
--;
1588 if (cmd_buffer
->state
.active_occlusion_queries
== 0) {
1589 radv_set_db_count_control(cmd_buffer
);
1591 /* Reset the perfect occlusion queries hint now that no
1592 * queries are active.
1594 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= false;
1597 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1598 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1599 radeon_emit(cs
, va
+ 8);
1600 radeon_emit(cs
, (va
+ 8) >> 32);
1603 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1604 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1606 cmd_buffer
->state
.active_pipeline_queries
--;
1607 if (cmd_buffer
->state
.active_pipeline_queries
== 0) {
1608 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_START_PIPELINE_STATS
;
1609 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1611 va
+= pipelinestat_block_size
;
1613 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1614 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1615 radeon_emit(cs
, va
);
1616 radeon_emit(cs
, va
>> 32);
1618 si_cs_emit_write_event_eop(cs
,
1619 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1620 radv_cmd_buffer_uses_mec(cmd_buffer
),
1621 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1623 EOP_DATA_SEL_VALUE_32BIT
,
1625 cmd_buffer
->gfx9_eop_bug_va
);
1627 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1628 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1630 assert(index
< MAX_SO_STREAMS
);
1632 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1633 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1634 radeon_emit(cs
, (va
+ 16));
1635 radeon_emit(cs
, (va
+ 16) >> 32);
1638 unreachable("ending unhandled query type");
1641 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1642 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1643 RADV_CMD_FLAG_INV_L2
|
1644 RADV_CMD_FLAG_INV_VCACHE
;
1645 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1646 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1647 RADV_CMD_FLAG_FLUSH_AND_INV_DB
;
1651 void radv_CmdBeginQueryIndexedEXT(
1652 VkCommandBuffer commandBuffer
,
1653 VkQueryPool queryPool
,
1655 VkQueryControlFlags flags
,
1658 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1659 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1660 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1661 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1663 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1665 emit_query_flush(cmd_buffer
, pool
);
1667 va
+= pool
->stride
* query
;
1669 emit_begin_query(cmd_buffer
, va
, pool
->type
, flags
, index
);
1672 void radv_CmdBeginQuery(
1673 VkCommandBuffer commandBuffer
,
1674 VkQueryPool queryPool
,
1676 VkQueryControlFlags flags
)
1678 radv_CmdBeginQueryIndexedEXT(commandBuffer
, queryPool
, query
, flags
, 0);
1681 void radv_CmdEndQueryIndexedEXT(
1682 VkCommandBuffer commandBuffer
,
1683 VkQueryPool queryPool
,
1687 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1688 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1689 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1690 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1691 va
+= pool
->stride
* query
;
1693 /* Do not need to add the pool BO to the list because the query must
1694 * currently be active, which means the BO is already in the list.
1696 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, index
);
1699 * For multiview we have to emit a query for each bit in the mask,
1700 * however the first query we emit will get the totals for all the
1701 * operations, so we don't want to get a real value in the other
1702 * queries. This emits a fake begin/end sequence so the waiting
1703 * code gets a completed query value and doesn't hang, but the
1706 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
) {
1707 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1710 for (unsigned i
= 1; i
< util_bitcount(cmd_buffer
->state
.subpass
->view_mask
); i
++) {
1713 emit_begin_query(cmd_buffer
, va
, pool
->type
, 0, 0);
1714 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, 0);
1719 void radv_CmdEndQuery(
1720 VkCommandBuffer commandBuffer
,
1721 VkQueryPool queryPool
,
1724 radv_CmdEndQueryIndexedEXT(commandBuffer
, queryPool
, query
, 0);
1727 void radv_CmdWriteTimestamp(
1728 VkCommandBuffer commandBuffer
,
1729 VkPipelineStageFlagBits pipelineStage
,
1730 VkQueryPool queryPool
,
1733 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1734 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1735 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
1736 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1737 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1738 uint64_t query_va
= va
+ pool
->stride
* query
;
1740 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1742 emit_query_flush(cmd_buffer
, pool
);
1744 int num_queries
= 1;
1745 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
)
1746 num_queries
= util_bitcount(cmd_buffer
->state
.subpass
->view_mask
);
1748 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28 * num_queries
);
1750 for (unsigned i
= 0; i
< num_queries
; i
++) {
1751 switch(pipelineStage
) {
1752 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1753 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1754 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
1755 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
1756 COPY_DATA_DST_SEL(V_370_MEM
));
1759 radeon_emit(cs
, query_va
);
1760 radeon_emit(cs
, query_va
>> 32);
1763 si_cs_emit_write_event_eop(cs
,
1764 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1766 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1768 EOP_DATA_SEL_TIMESTAMP
,
1770 cmd_buffer
->gfx9_eop_bug_va
);
1773 query_va
+= pool
->stride
;
1775 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);