2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 #define TIMESTAMP_NOT_READY UINT64_MAX
40 static const int pipelinestat_block_size
= 11 * 8;
41 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
43 static unsigned get_max_db(struct radv_device
*device
)
45 unsigned num_db
= device
->physical_device
->rad_info
.num_render_backends
;
46 MAYBE_UNUSED
unsigned rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask
== ((1ull << num_db
) - 1));
55 static nir_ssa_def
*nir_test_flag(nir_builder
*b
, nir_ssa_def
*flags
, uint32_t flag
)
57 return nir_i2b(b
, nir_iand(b
, flags
, nir_imm_int(b
, flag
)));
60 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
62 nir_ssa_def
*counter
= nir_load_var(b
, var
);
64 nir_if
*if_stmt
= nir_if_create(b
->shader
);
65 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
66 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
68 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
70 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
71 nir_builder_instr_insert(b
, &instr
->instr
);
73 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
74 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
75 nir_store_var(b
, var
, counter
, 0x1);
78 static struct nir_ssa_def
*
79 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
81 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
82 nir_intrinsic_set_base(flags
, 0);
83 nir_intrinsic_set_range(flags
, 16);
84 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
85 flags
->num_components
= 1;
86 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
87 nir_builder_instr_insert(b
, &flags
->instr
);
88 return &flags
->dest
.ssa
;
92 build_occlusion_query_shader(struct radv_device
*device
) {
93 /* the shader this builds is roughly
97 * uint32_t dst_stride;
100 * uint32_t src_stride = 16 * db_count;
102 * location(binding = 0) buffer dst_buf;
103 * location(binding = 1) buffer src_buf;
106 * uint64_t result = 0;
107 * uint64_t src_offset = src_stride * global_id.x;
108 * uint64_t dst_offset = dst_stride * global_id.x;
109 * bool available = true;
110 * for (int i = 0; i < db_count; ++i) {
111 * uint64_t start = src_buf[src_offset + 16 * i];
112 * uint64_t end = src_buf[src_offset + 16 * i + 8];
113 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
114 * result += end - start;
118 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
119 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
120 * if (flags & VK_QUERY_RESULT_64_BIT)
121 * dst_buf[dst_offset] = result;
123 * dst_buf[dst_offset] = (uint32_t)result.
125 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
126 * dst_buf[dst_offset + elem_size] = available;
131 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
132 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
133 b
.shader
->info
.cs
.local_size
[0] = 64;
134 b
.shader
->info
.cs
.local_size
[1] = 1;
135 b
.shader
->info
.cs
.local_size
[2] = 1;
137 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
138 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
139 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
140 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
141 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
142 unsigned db_count
= get_max_db(device
);
144 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
146 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
147 nir_intrinsic_vulkan_resource_index
);
148 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
149 dst_buf
->num_components
= 1;
150 nir_intrinsic_set_desc_set(dst_buf
, 0);
151 nir_intrinsic_set_binding(dst_buf
, 0);
152 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
153 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
155 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
156 nir_intrinsic_vulkan_resource_index
);
157 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
158 src_buf
->num_components
= 1;
159 nir_intrinsic_set_desc_set(src_buf
, 0);
160 nir_intrinsic_set_binding(src_buf
, 1);
161 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
162 nir_builder_instr_insert(&b
, &src_buf
->instr
);
164 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
165 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
166 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
167 b
.shader
->info
.cs
.local_size
[0],
168 b
.shader
->info
.cs
.local_size
[1],
169 b
.shader
->info
.cs
.local_size
[2], 0);
170 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
171 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
173 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
174 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
175 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
176 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
179 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
180 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
181 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
183 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
184 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
185 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
187 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
188 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
190 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
191 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
193 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
194 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
195 load
->src
[1] = nir_src_for_ssa(load_offset
);
196 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
197 load
->num_components
= 2;
198 nir_builder_instr_insert(&b
, &load
->instr
);
200 nir_store_var(&b
, start
, nir_channel(&b
, &load
->dest
.ssa
, 0), 0x1);
201 nir_store_var(&b
, end
, nir_channel(&b
, &load
->dest
.ssa
, 1), 0x1);
203 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
204 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
206 nir_if
*update_if
= nir_if_create(b
.shader
);
207 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
208 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
210 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
212 nir_store_var(&b
, result
,
213 nir_iadd(&b
, nir_load_var(&b
, result
),
214 nir_isub(&b
, nir_load_var(&b
, end
),
215 nir_load_var(&b
, start
))), 0x1);
217 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
219 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
221 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
223 /* Store the result if complete or if partial results have been requested. */
225 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
226 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
228 nir_if
*store_if
= nir_if_create(b
.shader
);
229 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
), nir_load_var(&b
, available
)));
230 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
232 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
234 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
235 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
236 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
238 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
240 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
241 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
242 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
243 store
->src
[2] = nir_src_for_ssa(output_base
);
244 nir_intrinsic_set_write_mask(store
, 0x1);
245 store
->num_components
= 1;
246 nir_builder_instr_insert(&b
, &store
->instr
);
248 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
250 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
251 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
252 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
253 store
->src
[2] = nir_src_for_ssa(output_base
);
254 nir_intrinsic_set_write_mask(store
, 0x1);
255 store
->num_components
= 1;
256 nir_builder_instr_insert(&b
, &store
->instr
);
258 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
260 /* Store the availability bit if requested. */
262 nir_if
*availability_if
= nir_if_create(b
.shader
);
263 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
264 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
266 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
268 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
269 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
270 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
271 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
272 nir_intrinsic_set_write_mask(store
, 0x1);
273 store
->num_components
= 1;
274 nir_builder_instr_insert(&b
, &store
->instr
);
280 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
281 /* the shader this builds is roughly
285 * uint32_t dst_stride;
286 * uint32_t stats_mask;
287 * uint32_t avail_offset;
290 * uint32_t src_stride = pipelinestat_block_size * 2;
292 * location(binding = 0) buffer dst_buf;
293 * location(binding = 1) buffer src_buf;
296 * uint64_t src_offset = src_stride * global_id.x;
297 * uint64_t dst_base = dst_stride * global_id.x;
298 * uint64_t dst_offset = dst_base;
299 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
300 * uint32_t elem_count = stats_mask >> 16;
301 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
302 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
303 * dst_buf[dst_offset + elem_count * elem_size] = available32;
305 * if ((bool)available32) {
306 * // repeat 11 times:
307 * if (stats_mask & (1 << 0)) {
308 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
309 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
310 * uint64_t result = end - start;
311 * if (flags & VK_QUERY_RESULT_64_BIT)
312 * dst_buf[dst_offset] = result;
314 * dst_buf[dst_offset] = (uint32_t)result.
315 * dst_offset += elem_size;
317 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
318 * // Set everything to 0 as we don't know what is valid.
319 * for (int i = 0; i < elem_count; ++i)
320 * dst_buf[dst_base + elem_size * i] = 0;
325 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
326 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
327 b
.shader
->info
.cs
.local_size
[0] = 64;
328 b
.shader
->info
.cs
.local_size
[1] = 1;
329 b
.shader
->info
.cs
.local_size
[2] = 1;
331 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
333 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
334 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
335 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
337 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
338 nir_intrinsic_vulkan_resource_index
);
339 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
340 dst_buf
->num_components
= 1;;
341 nir_intrinsic_set_desc_set(dst_buf
, 0);
342 nir_intrinsic_set_binding(dst_buf
, 0);
343 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
344 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
346 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
347 nir_intrinsic_vulkan_resource_index
);
348 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
349 src_buf
->num_components
= 1;
350 nir_intrinsic_set_desc_set(src_buf
, 0);
351 nir_intrinsic_set_binding(src_buf
, 1);
352 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
353 nir_builder_instr_insert(&b
, &src_buf
->instr
);
355 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
356 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
357 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
358 b
.shader
->info
.cs
.local_size
[0],
359 b
.shader
->info
.cs
.local_size
[1],
360 b
.shader
->info
.cs
.local_size
[2], 0);
361 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
362 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
364 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
365 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
366 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
367 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
370 avail_offset
= nir_iadd(&b
, avail_offset
,
371 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
373 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
374 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
375 load
->src
[1] = nir_src_for_ssa(avail_offset
);
376 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
377 load
->num_components
= 1;
378 nir_builder_instr_insert(&b
, &load
->instr
);
379 nir_ssa_def
*available32
= &load
->dest
.ssa
;
381 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
382 nir_ssa_def
*elem_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
383 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
385 /* Store the availability bit if requested. */
387 nir_if
*availability_if
= nir_if_create(b
.shader
);
388 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
389 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
391 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
393 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
394 store
->src
[0] = nir_src_for_ssa(available32
);
395 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
396 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)));
397 nir_intrinsic_set_write_mask(store
, 0x1);
398 store
->num_components
= 1;
399 nir_builder_instr_insert(&b
, &store
->instr
);
401 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
403 nir_if
*available_if
= nir_if_create(b
.shader
);
404 available_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, available32
));
405 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
407 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
409 nir_store_var(&b
, output_offset
, output_base
, 0x1);
410 for (int i
= 0; i
< 11; ++i
) {
411 nir_if
*store_if
= nir_if_create(b
.shader
);
412 store_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, stats_mask
, 1u << i
));
413 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
415 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
417 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
418 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
419 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
420 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
421 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
422 load
->num_components
= 1;
423 nir_builder_instr_insert(&b
, &load
->instr
);
424 nir_ssa_def
*start
= &load
->dest
.ssa
;
426 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
427 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
428 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
429 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
430 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
431 load
->num_components
= 1;
432 nir_builder_instr_insert(&b
, &load
->instr
);
433 nir_ssa_def
*end
= &load
->dest
.ssa
;
435 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
438 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
439 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
440 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
442 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
444 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
445 store
->src
[0] = nir_src_for_ssa(result
);
446 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
447 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
448 nir_intrinsic_set_write_mask(store
, 0x1);
449 store
->num_components
= 1;
450 nir_builder_instr_insert(&b
, &store
->instr
);
452 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
454 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
455 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
456 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
457 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
458 nir_intrinsic_set_write_mask(store
, 0x1);
459 store
->num_components
= 1;
460 nir_builder_instr_insert(&b
, &store
->instr
);
462 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
464 nir_store_var(&b
, output_offset
,
465 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
468 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
471 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
473 available_if
= nir_if_create(b
.shader
);
474 available_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
));
475 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
477 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
479 /* Stores zeros in all outputs. */
481 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
482 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
484 nir_loop
*loop
= nir_loop_create(b
.shader
);
485 nir_builder_cf_insert(&b
, &loop
->cf_node
);
486 b
.cursor
= nir_after_cf_list(&loop
->body
);
488 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
489 radv_break_on_count(&b
, counter
, elem_count
);
491 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
492 nir_imul(&b
, elem_size
, current_counter
));
494 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
495 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
496 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
498 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
500 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
501 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
502 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
503 store
->src
[2] = nir_src_for_ssa(output_elem
);
504 nir_intrinsic_set_write_mask(store
, 0x1);
505 store
->num_components
= 1;
506 nir_builder_instr_insert(&b
, &store
->instr
);
508 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
510 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
511 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
512 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
513 store
->src
[2] = nir_src_for_ssa(output_elem
);
514 nir_intrinsic_set_write_mask(store
, 0x1);
515 store
->num_components
= 1;
516 nir_builder_instr_insert(&b
, &store
->instr
);
518 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
523 build_tfb_query_shader(struct radv_device
*device
)
525 /* the shader this builds is roughly
527 * uint32_t src_stride = 32;
529 * location(binding = 0) buffer dst_buf;
530 * location(binding = 1) buffer src_buf;
533 * uint64_t result[2] = {};
534 * bool available = false;
535 * uint64_t src_offset = src_stride * global_id.x;
536 * uint64_t dst_offset = dst_stride * global_id.x;
537 * uint64_t *src_data = src_buf[src_offset];
538 * uint32_t avail = (src_data[0] >> 32) &
539 * (src_data[1] >> 32) &
540 * (src_data[2] >> 32) &
541 * (src_data[3] >> 32);
542 * if (avail & 0x80000000) {
543 * result[0] = src_data[3] - src_data[1];
544 * result[1] = src_data[2] - src_data[0];
547 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
548 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
549 * if (flags & VK_QUERY_RESULT_64_BIT) {
550 * dst_buf[dst_offset] = result;
552 * dst_buf[dst_offset] = (uint32_t)result;
555 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
556 * dst_buf[dst_offset + result_size] = available;
561 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
562 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "tfb_query");
563 b
.shader
->info
.cs
.local_size
[0] = 64;
564 b
.shader
->info
.cs
.local_size
[1] = 1;
565 b
.shader
->info
.cs
.local_size
[2] = 1;
567 /* Create and initialize local variables. */
568 nir_variable
*result
=
569 nir_local_variable_create(b
.impl
,
570 glsl_vector_type(GLSL_TYPE_UINT64
, 2),
572 nir_variable
*available
=
573 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
575 nir_store_var(&b
, result
,
576 nir_vec2(&b
, nir_imm_int64(&b
, 0),
577 nir_imm_int64(&b
, 0)), 0x3);
578 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
580 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
582 /* Load resources. */
583 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
584 nir_intrinsic_vulkan_resource_index
);
585 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
586 dst_buf
->num_components
= 1;
587 nir_intrinsic_set_desc_set(dst_buf
, 0);
588 nir_intrinsic_set_binding(dst_buf
, 0);
589 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
590 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
592 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
593 nir_intrinsic_vulkan_resource_index
);
594 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
595 src_buf
->num_components
= 1;
596 nir_intrinsic_set_desc_set(src_buf
, 0);
597 nir_intrinsic_set_binding(src_buf
, 1);
598 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
599 nir_builder_instr_insert(&b
, &src_buf
->instr
);
601 /* Compute global ID. */
602 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
603 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
604 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
605 b
.shader
->info
.cs
.local_size
[0],
606 b
.shader
->info
.cs
.local_size
[1],
607 b
.shader
->info
.cs
.local_size
[2], 0);
608 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
609 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
611 /* Compute src/dst strides. */
612 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 32);
613 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
614 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
615 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
617 /* Load data from the query pool. */
618 nir_intrinsic_instr
*load1
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
619 load1
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
620 load1
->src
[1] = nir_src_for_ssa(input_base
);
621 nir_ssa_dest_init(&load1
->instr
, &load1
->dest
, 4, 32, NULL
);
622 load1
->num_components
= 4;
623 nir_builder_instr_insert(&b
, &load1
->instr
);
625 nir_intrinsic_instr
*load2
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
626 load2
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
627 load2
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
, nir_imm_int(&b
, 16)));
628 nir_ssa_dest_init(&load2
->instr
, &load2
->dest
, 4, 32, NULL
);
629 load2
->num_components
= 4;
630 nir_builder_instr_insert(&b
, &load2
->instr
);
632 /* Check if result is available. */
633 nir_ssa_def
*avails
[2];
634 avails
[0] = nir_iand(&b
, nir_channel(&b
, &load1
->dest
.ssa
, 1),
635 nir_channel(&b
, &load1
->dest
.ssa
, 3));
636 avails
[1] = nir_iand(&b
, nir_channel(&b
, &load2
->dest
.ssa
, 1),
637 nir_channel(&b
, &load2
->dest
.ssa
, 3));
638 nir_ssa_def
*result_is_available
=
639 nir_i2b(&b
, nir_iand(&b
, nir_iand(&b
, avails
[0], avails
[1]),
640 nir_imm_int(&b
, 0x80000000)));
642 /* Only compute result if available. */
643 nir_if
*available_if
= nir_if_create(b
.shader
);
644 available_if
->condition
= nir_src_for_ssa(result_is_available
);
645 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
647 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
650 nir_ssa_def
*packed64
[4];
651 packed64
[0] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
652 nir_channel(&b
, &load1
->dest
.ssa
, 0),
653 nir_channel(&b
, &load1
->dest
.ssa
, 1)));
654 packed64
[1] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
655 nir_channel(&b
, &load1
->dest
.ssa
, 2),
656 nir_channel(&b
, &load1
->dest
.ssa
, 3)));
657 packed64
[2] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
658 nir_channel(&b
, &load2
->dest
.ssa
, 0),
659 nir_channel(&b
, &load2
->dest
.ssa
, 1)));
660 packed64
[3] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
661 nir_channel(&b
, &load2
->dest
.ssa
, 2),
662 nir_channel(&b
, &load2
->dest
.ssa
, 3)));
664 /* Compute result. */
665 nir_ssa_def
*num_primitive_written
=
666 nir_isub(&b
, packed64
[3], packed64
[1]);
667 nir_ssa_def
*primitive_storage_needed
=
668 nir_isub(&b
, packed64
[2], packed64
[0]);
670 nir_store_var(&b
, result
,
671 nir_vec2(&b
, num_primitive_written
,
672 primitive_storage_needed
), 0x3);
673 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
675 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
677 /* Determine if result is 64 or 32 bit. */
678 nir_ssa_def
*result_is_64bit
=
679 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
680 nir_ssa_def
*result_size
=
681 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 16),
684 /* Store the result if complete or partial results have been requested. */
685 nir_if
*store_if
= nir_if_create(b
.shader
);
686 store_if
->condition
=
687 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
688 nir_load_var(&b
, available
)));
689 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
691 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
694 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
695 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
696 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
698 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
700 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
701 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
702 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
703 store
->src
[2] = nir_src_for_ssa(output_base
);
704 nir_intrinsic_set_write_mask(store
, 0x3);
705 store
->num_components
= 2;
706 nir_builder_instr_insert(&b
, &store
->instr
);
708 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
710 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
711 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
712 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
713 store
->src
[2] = nir_src_for_ssa(output_base
);
714 nir_intrinsic_set_write_mask(store
, 0x3);
715 store
->num_components
= 2;
716 nir_builder_instr_insert(&b
, &store
->instr
);
718 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
720 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
722 /* Store the availability bit if requested. */
723 nir_if
*availability_if
= nir_if_create(b
.shader
);
724 availability_if
->condition
=
725 nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
726 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
728 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
730 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
731 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
732 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
733 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
734 nir_intrinsic_set_write_mask(store
, 0x1);
735 store
->num_components
= 1;
736 nir_builder_instr_insert(&b
, &store
->instr
);
738 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
743 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device
*device
)
746 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
747 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
748 struct radv_shader_module tfb_cs
= { .nir
= NULL
};
750 mtx_lock(&device
->meta_state
.mtx
);
751 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
) {
752 mtx_unlock(&device
->meta_state
.mtx
);
755 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
756 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
757 tfb_cs
.nir
= build_tfb_query_shader(device
);
759 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
760 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
761 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
763 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
766 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
767 .descriptorCount
= 1,
768 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
769 .pImmutableSamplers
= NULL
773 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
774 .descriptorCount
= 1,
775 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
776 .pImmutableSamplers
= NULL
781 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
782 &occlusion_ds_create_info
,
783 &device
->meta_state
.alloc
,
784 &device
->meta_state
.query
.ds_layout
);
785 if (result
!= VK_SUCCESS
)
788 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
789 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
791 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
792 .pushConstantRangeCount
= 1,
793 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
796 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
797 &occlusion_pl_create_info
,
798 &device
->meta_state
.alloc
,
799 &device
->meta_state
.query
.p_layout
);
800 if (result
!= VK_SUCCESS
)
803 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
804 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
805 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
806 .module
= radv_shader_module_to_handle(&occlusion_cs
),
808 .pSpecializationInfo
= NULL
,
811 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
812 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
813 .stage
= occlusion_pipeline_shader_stage
,
815 .layout
= device
->meta_state
.query
.p_layout
,
818 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
819 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
820 1, &occlusion_vk_pipeline_info
, NULL
,
821 &device
->meta_state
.query
.occlusion_query_pipeline
);
822 if (result
!= VK_SUCCESS
)
825 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
826 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
827 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
828 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
830 .pSpecializationInfo
= NULL
,
833 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
834 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
835 .stage
= pipeline_statistics_pipeline_shader_stage
,
837 .layout
= device
->meta_state
.query
.p_layout
,
840 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
841 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
842 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
843 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
844 if (result
!= VK_SUCCESS
)
847 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage
= {
848 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
849 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
850 .module
= radv_shader_module_to_handle(&tfb_cs
),
852 .pSpecializationInfo
= NULL
,
855 VkComputePipelineCreateInfo tfb_pipeline_info
= {
856 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
857 .stage
= tfb_pipeline_shader_stage
,
859 .layout
= device
->meta_state
.query
.p_layout
,
862 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
863 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
864 1, &tfb_pipeline_info
, NULL
,
865 &device
->meta_state
.query
.tfb_query_pipeline
);
867 if (result
!= VK_SUCCESS
)
868 radv_device_finish_meta_query_state(device
);
869 ralloc_free(occlusion_cs
.nir
);
870 ralloc_free(pipeline_statistics_cs
.nir
);
871 ralloc_free(tfb_cs
.nir
);
872 mtx_unlock(&device
->meta_state
.mtx
);
876 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
, bool on_demand
)
881 return radv_device_init_meta_query_state_internal(device
);
884 void radv_device_finish_meta_query_state(struct radv_device
*device
)
886 if (device
->meta_state
.query
.tfb_query_pipeline
)
887 radv_DestroyPipeline(radv_device_to_handle(device
),
888 device
->meta_state
.query
.tfb_query_pipeline
,
889 &device
->meta_state
.alloc
);
891 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
892 radv_DestroyPipeline(radv_device_to_handle(device
),
893 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
894 &device
->meta_state
.alloc
);
896 if (device
->meta_state
.query
.occlusion_query_pipeline
)
897 radv_DestroyPipeline(radv_device_to_handle(device
),
898 device
->meta_state
.query
.occlusion_query_pipeline
,
899 &device
->meta_state
.alloc
);
901 if (device
->meta_state
.query
.p_layout
)
902 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
903 device
->meta_state
.query
.p_layout
,
904 &device
->meta_state
.alloc
);
906 if (device
->meta_state
.query
.ds_layout
)
907 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
908 device
->meta_state
.query
.ds_layout
,
909 &device
->meta_state
.alloc
);
912 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
913 VkPipeline
*pipeline
,
914 struct radeon_winsys_bo
*src_bo
,
915 struct radeon_winsys_bo
*dst_bo
,
916 uint64_t src_offset
, uint64_t dst_offset
,
917 uint32_t src_stride
, uint32_t dst_stride
,
918 uint32_t count
, uint32_t flags
,
919 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
921 struct radv_device
*device
= cmd_buffer
->device
;
922 struct radv_meta_saved_state saved_state
;
923 bool old_predicating
;
926 VkResult ret
= radv_device_init_meta_query_state_internal(device
);
927 if (ret
!= VK_SUCCESS
) {
928 cmd_buffer
->record_result
= ret
;
933 radv_meta_save(&saved_state
, cmd_buffer
,
934 RADV_META_SAVE_COMPUTE_PIPELINE
|
935 RADV_META_SAVE_CONSTANTS
|
936 RADV_META_SAVE_DESCRIPTORS
);
938 /* VK_EXT_conditional_rendering says that copy commands should not be
939 * affected by conditional rendering.
941 old_predicating
= cmd_buffer
->state
.predicating
;
942 cmd_buffer
->state
.predicating
= false;
944 struct radv_buffer dst_buffer
= {
946 .offset
= dst_offset
,
947 .size
= dst_stride
* count
950 struct radv_buffer src_buffer
= {
952 .offset
= src_offset
,
953 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
956 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
957 VK_PIPELINE_BIND_POINT_COMPUTE
, *pipeline
);
959 radv_meta_push_descriptor_set(cmd_buffer
,
960 VK_PIPELINE_BIND_POINT_COMPUTE
,
961 device
->meta_state
.query
.p_layout
,
963 2, /* descriptorWriteCount */
964 (VkWriteDescriptorSet
[]) {
966 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
968 .dstArrayElement
= 0,
969 .descriptorCount
= 1,
970 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
971 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
972 .buffer
= radv_buffer_to_handle(&dst_buffer
),
974 .range
= VK_WHOLE_SIZE
978 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
980 .dstArrayElement
= 0,
981 .descriptorCount
= 1,
982 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
983 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
984 .buffer
= radv_buffer_to_handle(&src_buffer
),
986 .range
= VK_WHOLE_SIZE
991 /* Encode the number of elements for easy access by the shader. */
992 pipeline_stats_mask
&= 0x7ff;
993 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
995 avail_offset
-= src_offset
;
1000 uint32_t pipeline_stats_mask
;
1001 uint32_t avail_offset
;
1002 } push_constants
= {
1005 pipeline_stats_mask
,
1009 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
1010 device
->meta_state
.query
.p_layout
,
1011 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
1014 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
1015 RADV_CMD_FLAG_INV_VMEM_L1
;
1017 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1018 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
1020 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
1022 /* Restore conditional rendering. */
1023 cmd_buffer
->state
.predicating
= old_predicating
;
1025 radv_meta_restore(&saved_state
, cmd_buffer
);
1028 VkResult
radv_CreateQueryPool(
1030 const VkQueryPoolCreateInfo
* pCreateInfo
,
1031 const VkAllocationCallbacks
* pAllocator
,
1032 VkQueryPool
* pQueryPool
)
1034 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1035 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
1037 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1038 uint32_t initial_value
= pCreateInfo
->queryType
== VK_QUERY_TYPE_TIMESTAMP
1039 ? TIMESTAMP_NOT_READY
: 0;
1042 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1045 switch(pCreateInfo
->queryType
) {
1046 case VK_QUERY_TYPE_OCCLUSION
:
1047 pool
->stride
= 16 * get_max_db(device
);
1049 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1050 pool
->stride
= pipelinestat_block_size
* 2;
1052 case VK_QUERY_TYPE_TIMESTAMP
:
1055 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1059 unreachable("creating unhandled query type");
1062 pool
->type
= pCreateInfo
->queryType
;
1063 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
1064 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
1065 pool
->size
= pool
->availability_offset
;
1066 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1067 pool
->size
+= 4 * pCreateInfo
->queryCount
;
1069 pool
->bo
= device
->ws
->buffer_create(device
->ws
, pool
->size
,
1070 64, RADEON_DOMAIN_GTT
, RADEON_FLAG_NO_INTERPROCESS_SHARING
,
1071 RADV_BO_PRIORITY_QUERY_POOL
);
1074 vk_free2(&device
->alloc
, pAllocator
, pool
);
1075 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1078 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
1081 device
->ws
->buffer_destroy(pool
->bo
);
1082 vk_free2(&device
->alloc
, pAllocator
, pool
);
1083 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1085 memset(pool
->ptr
, initial_value
, pool
->size
);
1087 *pQueryPool
= radv_query_pool_to_handle(pool
);
1091 void radv_DestroyQueryPool(
1094 const VkAllocationCallbacks
* pAllocator
)
1096 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1097 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
1102 device
->ws
->buffer_destroy(pool
->bo
);
1103 vk_free2(&device
->alloc
, pAllocator
, pool
);
1106 VkResult
radv_GetQueryPoolResults(
1108 VkQueryPool queryPool
,
1109 uint32_t firstQuery
,
1110 uint32_t queryCount
,
1113 VkDeviceSize stride
,
1114 VkQueryResultFlags flags
)
1116 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1117 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1119 VkResult result
= VK_SUCCESS
;
1121 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
1123 unsigned query
= firstQuery
+ i
;
1124 char *src
= pool
->ptr
+ query
* pool
->stride
;
1127 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1128 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1129 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
1131 available
= *(uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
1134 switch (pool
->type
) {
1135 case VK_QUERY_TYPE_TIMESTAMP
: {
1136 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1138 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1139 while (*(volatile uint64_t *)src
== TIMESTAMP_NOT_READY
)
1141 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1144 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1145 result
= VK_NOT_READY
;
1150 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1151 *(uint64_t*)dest
= *(uint64_t*)src
;
1154 *(uint32_t*)dest
= *(uint32_t*)src
;
1159 case VK_QUERY_TYPE_OCCLUSION
: {
1160 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1161 uint64_t sample_count
= 0;
1162 int db_count
= get_max_db(device
);
1165 for (int i
= 0; i
< db_count
; ++i
) {
1166 uint64_t start
, end
;
1168 start
= src64
[2 * i
];
1169 end
= src64
[2 * i
+ 1];
1170 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
1172 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
1175 sample_count
+= end
- start
;
1179 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1180 result
= VK_NOT_READY
;
1185 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1186 *(uint64_t*)dest
= sample_count
;
1189 *(uint32_t*)dest
= sample_count
;
1194 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
1195 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1196 result
= VK_NOT_READY
;
1201 const uint64_t *start
= (uint64_t*)src
;
1202 const uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
1203 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1204 uint64_t *dst
= (uint64_t*)dest
;
1205 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
1206 for(int i
= 0; i
< 11; ++i
)
1207 if(pool
->pipeline_stats_mask
& (1u << i
))
1208 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
1209 start
[pipeline_statistics_indices
[i
]];
1212 uint32_t *dst
= (uint32_t*)dest
;
1213 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
1214 for(int i
= 0; i
< 11; ++i
)
1215 if(pool
->pipeline_stats_mask
& (1u << i
))
1216 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
1217 start
[pipeline_statistics_indices
[i
]];
1221 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
: {
1222 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1223 uint64_t num_primitives_written
;
1224 uint64_t primitive_storage_needed
;
1226 /* SAMPLE_STREAMOUTSTATS stores this structure:
1228 * u64 NumPrimitivesWritten;
1229 * u64 PrimitiveStorageNeeded;
1233 for (int j
= 0; j
< 4; j
++) {
1234 if (!(src64
[j
] & 0x8000000000000000UL
))
1238 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1239 result
= VK_NOT_READY
;
1243 num_primitives_written
= src64
[3] - src64
[1];
1244 primitive_storage_needed
= src64
[2] - src64
[0];
1246 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1247 *(uint64_t *)dest
= num_primitives_written
;
1249 *(uint64_t *)dest
= primitive_storage_needed
;
1252 *(uint32_t *)dest
= num_primitives_written
;
1254 *(uint32_t *)dest
= primitive_storage_needed
;
1260 unreachable("trying to get results of unhandled query type");
1263 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1264 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1265 *(uint64_t*)dest
= available
;
1267 *(uint32_t*)dest
= available
;
1275 void radv_CmdCopyQueryPoolResults(
1276 VkCommandBuffer commandBuffer
,
1277 VkQueryPool queryPool
,
1278 uint32_t firstQuery
,
1279 uint32_t queryCount
,
1281 VkDeviceSize dstOffset
,
1282 VkDeviceSize stride
,
1283 VkQueryResultFlags flags
)
1285 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1286 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1287 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
1288 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1289 unsigned elem_size
= (flags
& VK_QUERY_RESULT_64_BIT
) ? 8 : 4;
1290 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1291 uint64_t dest_va
= radv_buffer_get_va(dst_buffer
->bo
);
1292 dest_va
+= dst_buffer
->offset
+ dstOffset
;
1294 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, pool
->bo
);
1295 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, dst_buffer
->bo
);
1297 switch (pool
->type
) {
1298 case VK_QUERY_TYPE_OCCLUSION
:
1299 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1300 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1301 unsigned query
= firstQuery
+ i
;
1302 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
1304 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1306 /* Waits on the upper word of the last DB entry */
1307 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1308 src_va
, 0x80000000, 0xffffffff);
1311 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
1312 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1313 dst_buffer
->offset
+ dstOffset
,
1314 pool
->stride
, stride
,
1315 queryCount
, flags
, 0, 0);
1317 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1318 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1319 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1320 unsigned query
= firstQuery
+ i
;
1322 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1324 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1326 /* This waits on the ME. All copies below are done on the ME */
1327 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
,
1328 avail_va
, 1, 0xffffffff);
1331 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1332 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1333 dst_buffer
->offset
+ dstOffset
,
1334 pool
->stride
, stride
, queryCount
, flags
,
1335 pool
->pipeline_stats_mask
,
1336 pool
->availability_offset
+ 4 * firstQuery
);
1338 case VK_QUERY_TYPE_TIMESTAMP
:
1339 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1340 unsigned query
= firstQuery
+ i
;
1341 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1343 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 19);
1346 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1347 /* Wait on the high 32 bits of the timestamp in
1348 * case the low part is 0xffffffff.
1350 radv_cp_wait_mem(cs
, WAIT_REG_MEM_NOT_EQUAL
,
1352 TIMESTAMP_NOT_READY
>> 32,
1355 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1356 uint64_t avail_dest_va
= dest_va
+ elem_size
;
1358 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1359 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1360 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
));
1361 radeon_emit(cs
, local_src_va
);
1362 radeon_emit(cs
, local_src_va
>> 32);
1363 radeon_emit(cs
, avail_dest_va
);
1364 radeon_emit(cs
, avail_dest_va
>> 32);
1367 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1368 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1369 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
) |
1370 ((flags
& VK_QUERY_RESULT_64_BIT
) ? COPY_DATA_COUNT_SEL
: 0));
1371 radeon_emit(cs
, local_src_va
);
1372 radeon_emit(cs
, local_src_va
>> 32);
1373 radeon_emit(cs
, dest_va
);
1374 radeon_emit(cs
, dest_va
>> 32);
1377 assert(cs
->cdw
<= cdw_max
);
1380 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1381 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1382 for(unsigned i
= 0; i
< queryCount
; i
++) {
1383 unsigned query
= firstQuery
+ i
;
1384 uint64_t src_va
= va
+ query
* pool
->stride
;
1386 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7 * 4);
1388 /* Wait on the upper word of all results. */
1389 for (unsigned j
= 0; j
< 4; j
++, src_va
+= 8) {
1390 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1391 src_va
+ 4, 0x80000000,
1397 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.tfb_query_pipeline
,
1398 pool
->bo
, dst_buffer
->bo
,
1399 firstQuery
* pool
->stride
,
1400 dst_buffer
->offset
+ dstOffset
,
1401 pool
->stride
, stride
,
1402 queryCount
, flags
, 0, 0);
1405 unreachable("trying to get results of unhandled query type");
1410 void radv_CmdResetQueryPool(
1411 VkCommandBuffer commandBuffer
,
1412 VkQueryPool queryPool
,
1413 uint32_t firstQuery
,
1414 uint32_t queryCount
)
1416 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1417 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1418 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1419 ? TIMESTAMP_NOT_READY
: 0;
1420 uint32_t flush_bits
= 0;
1422 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1423 firstQuery
* pool
->stride
,
1424 queryCount
* pool
->stride
, value
);
1426 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1427 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1428 pool
->availability_offset
+ firstQuery
* 4,
1433 /* Only need to flush caches for the compute shader path. */
1434 cmd_buffer
->pending_reset_query
= true;
1435 cmd_buffer
->state
.flush_bits
|= flush_bits
;
1439 static unsigned event_type_for_stream(unsigned stream
)
1443 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
1444 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
1445 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
1446 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
1450 static void emit_query_flush(struct radv_cmd_buffer
*cmd_buffer
,
1451 struct radv_query_pool
*pool
)
1453 if (cmd_buffer
->pending_reset_query
) {
1454 if (pool
->size
>= RADV_BUFFER_OPS_CS_THRESHOLD
) {
1455 /* Only need to flush caches if the query pool size is
1456 * large enough to be resetted using the compute shader
1457 * path. Small pools don't need any cache flushes
1458 * because we use a CP dma clear.
1460 si_emit_cache_flush(cmd_buffer
);
1465 static void emit_begin_query(struct radv_cmd_buffer
*cmd_buffer
,
1467 VkQueryType query_type
,
1468 VkQueryControlFlags flags
,
1471 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1472 switch (query_type
) {
1473 case VK_QUERY_TYPE_OCCLUSION
:
1474 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1476 ++cmd_buffer
->state
.active_occlusion_queries
;
1477 if (cmd_buffer
->state
.active_occlusion_queries
== 1) {
1478 if (flags
& VK_QUERY_CONTROL_PRECISE_BIT
) {
1479 /* This is the first occlusion query, enable
1480 * the hint if the precision bit is set.
1482 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1485 radv_set_db_count_control(cmd_buffer
);
1487 if ((flags
& VK_QUERY_CONTROL_PRECISE_BIT
) &&
1488 !cmd_buffer
->state
.perfect_occlusion_queries_enabled
) {
1489 /* This is not the first query, but this one
1490 * needs to enable precision, DB_COUNT_CONTROL
1491 * has to be updated accordingly.
1493 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1495 radv_set_db_count_control(cmd_buffer
);
1499 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1500 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1501 radeon_emit(cs
, va
);
1502 radeon_emit(cs
, va
>> 32);
1504 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1505 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1507 ++cmd_buffer
->state
.active_pipeline_queries
;
1508 if (cmd_buffer
->state
.active_pipeline_queries
== 1) {
1509 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1510 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_START_PIPELINE_STATS
;
1513 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1514 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1515 radeon_emit(cs
, va
);
1516 radeon_emit(cs
, va
>> 32);
1518 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1519 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1521 assert(index
< MAX_SO_STREAMS
);
1523 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1524 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1525 radeon_emit(cs
, va
);
1526 radeon_emit(cs
, va
>> 32);
1529 unreachable("beginning unhandled query type");
1534 static void emit_end_query(struct radv_cmd_buffer
*cmd_buffer
,
1535 uint64_t va
, uint64_t avail_va
,
1536 VkQueryType query_type
, uint32_t index
)
1538 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1539 switch (query_type
) {
1540 case VK_QUERY_TYPE_OCCLUSION
:
1541 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1543 cmd_buffer
->state
.active_occlusion_queries
--;
1544 if (cmd_buffer
->state
.active_occlusion_queries
== 0) {
1545 radv_set_db_count_control(cmd_buffer
);
1547 /* Reset the perfect occlusion queries hint now that no
1548 * queries are active.
1550 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= false;
1553 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1554 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1555 radeon_emit(cs
, va
+ 8);
1556 radeon_emit(cs
, (va
+ 8) >> 32);
1559 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1560 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1562 cmd_buffer
->state
.active_pipeline_queries
--;
1563 if (cmd_buffer
->state
.active_pipeline_queries
== 0) {
1564 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_START_PIPELINE_STATS
;
1565 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1567 va
+= pipelinestat_block_size
;
1569 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1570 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1571 radeon_emit(cs
, va
);
1572 radeon_emit(cs
, va
>> 32);
1574 si_cs_emit_write_event_eop(cs
,
1575 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1576 radv_cmd_buffer_uses_mec(cmd_buffer
),
1577 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1578 EOP_DATA_SEL_VALUE_32BIT
,
1580 cmd_buffer
->gfx9_eop_bug_va
);
1582 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1583 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1585 assert(index
< MAX_SO_STREAMS
);
1587 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1588 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1589 radeon_emit(cs
, (va
+ 16));
1590 radeon_emit(cs
, (va
+ 16) >> 32);
1593 unreachable("ending unhandled query type");
1597 void radv_CmdBeginQueryIndexedEXT(
1598 VkCommandBuffer commandBuffer
,
1599 VkQueryPool queryPool
,
1601 VkQueryControlFlags flags
,
1604 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1605 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1606 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1607 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1609 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1611 emit_query_flush(cmd_buffer
, pool
);
1613 va
+= pool
->stride
* query
;
1615 emit_begin_query(cmd_buffer
, va
, pool
->type
, flags
, index
);
1618 void radv_CmdBeginQuery(
1619 VkCommandBuffer commandBuffer
,
1620 VkQueryPool queryPool
,
1622 VkQueryControlFlags flags
)
1624 radv_CmdBeginQueryIndexedEXT(commandBuffer
, queryPool
, query
, flags
, 0);
1627 void radv_CmdEndQueryIndexedEXT(
1628 VkCommandBuffer commandBuffer
,
1629 VkQueryPool queryPool
,
1633 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1634 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1635 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1636 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1637 va
+= pool
->stride
* query
;
1639 /* Do not need to add the pool BO to the list because the query must
1640 * currently be active, which means the BO is already in the list.
1642 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, index
);
1645 * For multiview we have to emit a query for each bit in the mask,
1646 * however the first query we emit will get the totals for all the
1647 * operations, so we don't want to get a real value in the other
1648 * queries. This emits a fake begin/end sequence so the waiting
1649 * code gets a completed query value and doesn't hang, but the
1652 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
) {
1653 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1656 for (unsigned i
= 1; i
< util_bitcount(cmd_buffer
->state
.subpass
->view_mask
); i
++) {
1659 emit_begin_query(cmd_buffer
, va
, pool
->type
, 0, 0);
1660 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, 0);
1665 void radv_CmdEndQuery(
1666 VkCommandBuffer commandBuffer
,
1667 VkQueryPool queryPool
,
1670 radv_CmdEndQueryIndexedEXT(commandBuffer
, queryPool
, query
, 0);
1673 void radv_CmdWriteTimestamp(
1674 VkCommandBuffer commandBuffer
,
1675 VkPipelineStageFlagBits pipelineStage
,
1676 VkQueryPool queryPool
,
1679 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1680 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1681 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
1682 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1683 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1684 uint64_t query_va
= va
+ pool
->stride
* query
;
1686 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1688 emit_query_flush(cmd_buffer
, pool
);
1690 int num_queries
= 1;
1691 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
)
1692 num_queries
= util_bitcount(cmd_buffer
->state
.subpass
->view_mask
);
1694 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28 * num_queries
);
1696 for (unsigned i
= 0; i
< num_queries
; i
++) {
1697 switch(pipelineStage
) {
1698 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1699 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1700 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
1701 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
1702 COPY_DATA_DST_SEL(V_370_MEM
));
1705 radeon_emit(cs
, query_va
);
1706 radeon_emit(cs
, query_va
>> 32);
1709 si_cs_emit_write_event_eop(cs
,
1710 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1712 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1713 EOP_DATA_SEL_TIMESTAMP
,
1715 cmd_buffer
->gfx9_eop_bug_va
);
1718 query_va
+= pool
->stride
;
1720 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);