2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 #define TIMESTAMP_NOT_READY UINT64_MAX
40 static const int pipelinestat_block_size
= 11 * 8;
41 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
44 radv_get_pipeline_statistics_index(const VkQueryPipelineStatisticFlagBits flag
)
46 int offset
= ffs(flag
) - 1;
47 assert(offset
< ARRAY_SIZE(pipeline_statistics_indices
));
48 return pipeline_statistics_indices
[offset
];
51 static nir_ssa_def
*nir_test_flag(nir_builder
*b
, nir_ssa_def
*flags
, uint32_t flag
)
53 return nir_i2b(b
, nir_iand(b
, flags
, nir_imm_int(b
, flag
)));
56 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
58 nir_ssa_def
*counter
= nir_load_var(b
, var
);
60 nir_if
*if_stmt
= nir_if_create(b
->shader
);
61 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
62 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
64 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
66 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
67 nir_builder_instr_insert(b
, &instr
->instr
);
69 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
70 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
71 nir_store_var(b
, var
, counter
, 0x1);
74 static struct nir_ssa_def
*
75 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
77 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
78 nir_intrinsic_set_base(flags
, 0);
79 nir_intrinsic_set_range(flags
, 16);
80 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
81 flags
->num_components
= 1;
82 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
83 nir_builder_instr_insert(b
, &flags
->instr
);
84 return &flags
->dest
.ssa
;
88 radv_store_availability(nir_builder
*b
, nir_ssa_def
*flags
, nir_ssa_def
*dst_buf
,
89 nir_ssa_def
*offset
, nir_ssa_def
*value32
)
91 nir_ssa_def
*result_is_64bit
= nir_test_flag(b
, flags
, VK_QUERY_RESULT_64_BIT
);
92 nir_if
*availability_if
= nir_if_create(b
->shader
);
93 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
94 nir_cf_node_insert(b
->cursor
, &availability_if
->cf_node
);
96 b
->cursor
= nir_after_cf_list(&availability_if
->then_list
);
98 nir_if
*store_64bit_if
= nir_if_create(b
->shader
);
99 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
100 nir_cf_node_insert(b
->cursor
, &store_64bit_if
->cf_node
);
102 b
->cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
104 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_ssbo
);
105 store
->src
[0] = nir_src_for_ssa(nir_vec2(b
, value32
, nir_imm_int(b
, 0)));
106 store
->src
[1] = nir_src_for_ssa(dst_buf
);
107 store
->src
[2] = nir_src_for_ssa(offset
);
108 nir_intrinsic_set_write_mask(store
, 0x3);
109 nir_intrinsic_set_align(store
, 8, 0);
110 store
->num_components
= 2;
111 nir_builder_instr_insert(b
, &store
->instr
);
113 b
->cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
115 store
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_ssbo
);
116 store
->src
[0] = nir_src_for_ssa(value32
);
117 store
->src
[1] = nir_src_for_ssa(dst_buf
);
118 store
->src
[2] = nir_src_for_ssa(offset
);
119 nir_intrinsic_set_write_mask(store
, 0x1);
120 nir_intrinsic_set_align(store
, 4, 0);
121 store
->num_components
= 1;
122 nir_builder_instr_insert(b
, &store
->instr
);
124 b
->cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
126 b
->cursor
= nir_after_cf_node(&availability_if
->cf_node
);
130 build_occlusion_query_shader(struct radv_device
*device
) {
131 /* the shader this builds is roughly
135 * uint32_t dst_stride;
138 * uint32_t src_stride = 16 * db_count;
140 * location(binding = 0) buffer dst_buf;
141 * location(binding = 1) buffer src_buf;
144 * uint64_t result = 0;
145 * uint64_t src_offset = src_stride * global_id.x;
146 * uint64_t dst_offset = dst_stride * global_id.x;
147 * bool available = true;
148 * for (int i = 0; i < db_count; ++i) {
149 * if (enabled_rb_mask & (1 << i)) {
150 * uint64_t start = src_buf[src_offset + 16 * i];
151 * uint64_t end = src_buf[src_offset + 16 * i + 8];
152 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
153 * result += end - start;
158 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
159 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
160 * if (flags & VK_QUERY_RESULT_64_BIT)
161 * dst_buf[dst_offset] = result;
163 * dst_buf[dst_offset] = (uint32_t)result.
165 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
166 * dst_buf[dst_offset + elem_size] = available;
171 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
172 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
173 b
.shader
->info
.cs
.local_size
[0] = 64;
174 b
.shader
->info
.cs
.local_size
[1] = 1;
175 b
.shader
->info
.cs
.local_size
[2] = 1;
177 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
178 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
179 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
180 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
181 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
182 unsigned enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
183 unsigned db_count
= device
->physical_device
->rad_info
.num_render_backends
;
185 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
187 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
188 nir_intrinsic_vulkan_resource_index
);
189 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
190 dst_buf
->num_components
= 1;
191 nir_intrinsic_set_desc_set(dst_buf
, 0);
192 nir_intrinsic_set_binding(dst_buf
, 0);
193 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
194 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
196 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
197 nir_intrinsic_vulkan_resource_index
);
198 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
199 src_buf
->num_components
= 1;
200 nir_intrinsic_set_desc_set(src_buf
, 0);
201 nir_intrinsic_set_binding(src_buf
, 1);
202 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
203 nir_builder_instr_insert(&b
, &src_buf
->instr
);
205 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
206 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
207 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
208 b
.shader
->info
.cs
.local_size
[0],
209 b
.shader
->info
.cs
.local_size
[1],
210 b
.shader
->info
.cs
.local_size
[2], 0);
211 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
212 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
214 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
215 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
216 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
217 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
220 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
221 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
222 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
224 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
225 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
226 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
228 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
229 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
231 nir_ssa_def
*enabled_cond
=
232 nir_iand(&b
, nir_imm_int(&b
, enabled_rb_mask
),
233 nir_ishl(&b
, nir_imm_int(&b
, 1), current_outer_count
));
235 nir_if
*enabled_if
= nir_if_create(b
.shader
);
236 enabled_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, enabled_cond
));
237 nir_cf_node_insert(b
.cursor
, &enabled_if
->cf_node
);
239 b
.cursor
= nir_after_cf_list(&enabled_if
->then_list
);
241 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
242 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
244 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
245 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
246 load
->src
[1] = nir_src_for_ssa(load_offset
);
247 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
248 load
->num_components
= 2;
249 nir_intrinsic_set_align(load
, 16, 0);
250 nir_builder_instr_insert(&b
, &load
->instr
);
252 nir_store_var(&b
, start
, nir_channel(&b
, &load
->dest
.ssa
, 0), 0x1);
253 nir_store_var(&b
, end
, nir_channel(&b
, &load
->dest
.ssa
, 1), 0x1);
255 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
256 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
258 nir_if
*update_if
= nir_if_create(b
.shader
);
259 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
260 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
262 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
264 nir_store_var(&b
, result
,
265 nir_iadd(&b
, nir_load_var(&b
, result
),
266 nir_isub(&b
, nir_load_var(&b
, end
),
267 nir_load_var(&b
, start
))), 0x1);
269 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
271 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
273 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
275 /* Store the result if complete or if partial results have been requested. */
277 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
278 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
280 nir_if
*store_if
= nir_if_create(b
.shader
);
281 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
), nir_load_var(&b
, available
)));
282 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
284 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
286 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
287 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
288 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
290 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
292 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
293 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
294 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
295 store
->src
[2] = nir_src_for_ssa(output_base
);
296 nir_intrinsic_set_write_mask(store
, 0x1);
297 nir_intrinsic_set_align(store
, 8, 0);
298 store
->num_components
= 1;
299 nir_builder_instr_insert(&b
, &store
->instr
);
301 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
303 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
304 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
305 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
306 store
->src
[2] = nir_src_for_ssa(output_base
);
307 nir_intrinsic_set_write_mask(store
, 0x1);
308 nir_intrinsic_set_align(store
, 4, 0);
309 store
->num_components
= 1;
310 nir_builder_instr_insert(&b
, &store
->instr
);
312 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
314 radv_store_availability(&b
, flags
, &dst_buf
->dest
.ssa
,
315 nir_iadd(&b
, result_size
, output_base
),
316 nir_b2i32(&b
, nir_load_var(&b
, available
)));
322 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
323 /* the shader this builds is roughly
327 * uint32_t dst_stride;
328 * uint32_t stats_mask;
329 * uint32_t avail_offset;
332 * uint32_t src_stride = pipelinestat_block_size * 2;
334 * location(binding = 0) buffer dst_buf;
335 * location(binding = 1) buffer src_buf;
338 * uint64_t src_offset = src_stride * global_id.x;
339 * uint64_t dst_base = dst_stride * global_id.x;
340 * uint64_t dst_offset = dst_base;
341 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
342 * uint32_t elem_count = stats_mask >> 16;
343 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
344 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
345 * dst_buf[dst_offset + elem_count * elem_size] = available32;
347 * if ((bool)available32) {
348 * // repeat 11 times:
349 * if (stats_mask & (1 << 0)) {
350 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
351 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
352 * uint64_t result = end - start;
353 * if (flags & VK_QUERY_RESULT_64_BIT)
354 * dst_buf[dst_offset] = result;
356 * dst_buf[dst_offset] = (uint32_t)result.
357 * dst_offset += elem_size;
359 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
360 * // Set everything to 0 as we don't know what is valid.
361 * for (int i = 0; i < elem_count; ++i)
362 * dst_buf[dst_base + elem_size * i] = 0;
367 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
368 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
369 b
.shader
->info
.cs
.local_size
[0] = 64;
370 b
.shader
->info
.cs
.local_size
[1] = 1;
371 b
.shader
->info
.cs
.local_size
[2] = 1;
373 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
375 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
376 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
377 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
379 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
380 nir_intrinsic_vulkan_resource_index
);
381 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
382 dst_buf
->num_components
= 1;;
383 nir_intrinsic_set_desc_set(dst_buf
, 0);
384 nir_intrinsic_set_binding(dst_buf
, 0);
385 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
386 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
388 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
389 nir_intrinsic_vulkan_resource_index
);
390 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
391 src_buf
->num_components
= 1;
392 nir_intrinsic_set_desc_set(src_buf
, 0);
393 nir_intrinsic_set_binding(src_buf
, 1);
394 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
395 nir_builder_instr_insert(&b
, &src_buf
->instr
);
397 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
398 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
399 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
400 b
.shader
->info
.cs
.local_size
[0],
401 b
.shader
->info
.cs
.local_size
[1],
402 b
.shader
->info
.cs
.local_size
[2], 0);
403 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
404 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
406 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
407 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
408 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
409 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
412 avail_offset
= nir_iadd(&b
, avail_offset
,
413 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
415 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
416 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
417 load
->src
[1] = nir_src_for_ssa(avail_offset
);
418 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
419 load
->num_components
= 1;
420 nir_intrinsic_set_align(load
, 4, 0);
421 nir_builder_instr_insert(&b
, &load
->instr
);
422 nir_ssa_def
*available32
= &load
->dest
.ssa
;
424 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
425 nir_ssa_def
*elem_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
426 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
428 radv_store_availability(&b
, flags
, &dst_buf
->dest
.ssa
,
429 nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)),
432 nir_if
*available_if
= nir_if_create(b
.shader
);
433 available_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, available32
));
434 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
436 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
438 nir_store_var(&b
, output_offset
, output_base
, 0x1);
439 for (int i
= 0; i
< 11; ++i
) {
440 nir_if
*store_if
= nir_if_create(b
.shader
);
441 store_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, stats_mask
, 1u << i
));
442 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
444 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
446 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
447 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
448 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
449 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
450 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
451 load
->num_components
= 1;
452 nir_intrinsic_set_align(load
, 8, 0);
453 nir_builder_instr_insert(&b
, &load
->instr
);
454 nir_ssa_def
*start
= &load
->dest
.ssa
;
456 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
457 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
458 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
459 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
460 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
461 load
->num_components
= 1;
462 nir_intrinsic_set_align(load
, 8, 0);
463 nir_builder_instr_insert(&b
, &load
->instr
);
464 nir_ssa_def
*end
= &load
->dest
.ssa
;
466 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
469 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
470 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
471 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
473 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
475 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
476 store
->src
[0] = nir_src_for_ssa(result
);
477 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
478 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
479 nir_intrinsic_set_write_mask(store
, 0x1);
480 nir_intrinsic_set_align(store
, 8, 0);
481 store
->num_components
= 1;
482 nir_builder_instr_insert(&b
, &store
->instr
);
484 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
486 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
487 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
488 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
489 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
490 nir_intrinsic_set_write_mask(store
, 0x1);
491 nir_intrinsic_set_align(store
, 4, 0);
492 store
->num_components
= 1;
493 nir_builder_instr_insert(&b
, &store
->instr
);
495 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
497 nir_store_var(&b
, output_offset
,
498 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
501 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
504 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
506 available_if
= nir_if_create(b
.shader
);
507 available_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
));
508 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
510 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
512 /* Stores zeros in all outputs. */
514 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
515 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
517 nir_loop
*loop
= nir_loop_create(b
.shader
);
518 nir_builder_cf_insert(&b
, &loop
->cf_node
);
519 b
.cursor
= nir_after_cf_list(&loop
->body
);
521 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
522 radv_break_on_count(&b
, counter
, elem_count
);
524 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
525 nir_imul(&b
, elem_size
, current_counter
));
527 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
528 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
529 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
531 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
533 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
534 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
535 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
536 store
->src
[2] = nir_src_for_ssa(output_elem
);
537 nir_intrinsic_set_write_mask(store
, 0x1);
538 nir_intrinsic_set_align(store
, 8, 0);
539 store
->num_components
= 1;
540 nir_builder_instr_insert(&b
, &store
->instr
);
542 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
544 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
545 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
546 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
547 store
->src
[2] = nir_src_for_ssa(output_elem
);
548 nir_intrinsic_set_write_mask(store
, 0x1);
549 nir_intrinsic_set_align(store
, 4, 0);
550 store
->num_components
= 1;
551 nir_builder_instr_insert(&b
, &store
->instr
);
553 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
558 build_tfb_query_shader(struct radv_device
*device
)
560 /* the shader this builds is roughly
562 * uint32_t src_stride = 32;
564 * location(binding = 0) buffer dst_buf;
565 * location(binding = 1) buffer src_buf;
568 * uint64_t result[2] = {};
569 * bool available = false;
570 * uint64_t src_offset = src_stride * global_id.x;
571 * uint64_t dst_offset = dst_stride * global_id.x;
572 * uint64_t *src_data = src_buf[src_offset];
573 * uint32_t avail = (src_data[0] >> 32) &
574 * (src_data[1] >> 32) &
575 * (src_data[2] >> 32) &
576 * (src_data[3] >> 32);
577 * if (avail & 0x80000000) {
578 * result[0] = src_data[3] - src_data[1];
579 * result[1] = src_data[2] - src_data[0];
582 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
583 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
584 * if (flags & VK_QUERY_RESULT_64_BIT) {
585 * dst_buf[dst_offset] = result;
587 * dst_buf[dst_offset] = (uint32_t)result;
590 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
591 * dst_buf[dst_offset + result_size] = available;
596 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
597 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "tfb_query");
598 b
.shader
->info
.cs
.local_size
[0] = 64;
599 b
.shader
->info
.cs
.local_size
[1] = 1;
600 b
.shader
->info
.cs
.local_size
[2] = 1;
602 /* Create and initialize local variables. */
603 nir_variable
*result
=
604 nir_local_variable_create(b
.impl
,
605 glsl_vector_type(GLSL_TYPE_UINT64
, 2),
607 nir_variable
*available
=
608 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
610 nir_store_var(&b
, result
,
611 nir_vec2(&b
, nir_imm_int64(&b
, 0),
612 nir_imm_int64(&b
, 0)), 0x3);
613 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
615 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
617 /* Load resources. */
618 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
619 nir_intrinsic_vulkan_resource_index
);
620 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
621 dst_buf
->num_components
= 1;
622 nir_intrinsic_set_desc_set(dst_buf
, 0);
623 nir_intrinsic_set_binding(dst_buf
, 0);
624 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
625 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
627 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
628 nir_intrinsic_vulkan_resource_index
);
629 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
630 src_buf
->num_components
= 1;
631 nir_intrinsic_set_desc_set(src_buf
, 0);
632 nir_intrinsic_set_binding(src_buf
, 1);
633 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
634 nir_builder_instr_insert(&b
, &src_buf
->instr
);
636 /* Compute global ID. */
637 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
638 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
639 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
640 b
.shader
->info
.cs
.local_size
[0],
641 b
.shader
->info
.cs
.local_size
[1],
642 b
.shader
->info
.cs
.local_size
[2], 0);
643 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
644 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
646 /* Compute src/dst strides. */
647 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 32);
648 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
649 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
650 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
652 /* Load data from the query pool. */
653 nir_intrinsic_instr
*load1
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
654 load1
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
655 load1
->src
[1] = nir_src_for_ssa(input_base
);
656 nir_ssa_dest_init(&load1
->instr
, &load1
->dest
, 4, 32, NULL
);
657 load1
->num_components
= 4;
658 nir_intrinsic_set_align(load1
, 32, 0);
659 nir_builder_instr_insert(&b
, &load1
->instr
);
661 nir_intrinsic_instr
*load2
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
662 load2
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
663 load2
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
, nir_imm_int(&b
, 16)));
664 nir_ssa_dest_init(&load2
->instr
, &load2
->dest
, 4, 32, NULL
);
665 load2
->num_components
= 4;
666 nir_intrinsic_set_align(load2
, 16, 0);
667 nir_builder_instr_insert(&b
, &load2
->instr
);
669 /* Check if result is available. */
670 nir_ssa_def
*avails
[2];
671 avails
[0] = nir_iand(&b
, nir_channel(&b
, &load1
->dest
.ssa
, 1),
672 nir_channel(&b
, &load1
->dest
.ssa
, 3));
673 avails
[1] = nir_iand(&b
, nir_channel(&b
, &load2
->dest
.ssa
, 1),
674 nir_channel(&b
, &load2
->dest
.ssa
, 3));
675 nir_ssa_def
*result_is_available
=
676 nir_i2b(&b
, nir_iand(&b
, nir_iand(&b
, avails
[0], avails
[1]),
677 nir_imm_int(&b
, 0x80000000)));
679 /* Only compute result if available. */
680 nir_if
*available_if
= nir_if_create(b
.shader
);
681 available_if
->condition
= nir_src_for_ssa(result_is_available
);
682 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
684 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
687 nir_ssa_def
*packed64
[4];
688 packed64
[0] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
689 nir_channel(&b
, &load1
->dest
.ssa
, 0),
690 nir_channel(&b
, &load1
->dest
.ssa
, 1)));
691 packed64
[1] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
692 nir_channel(&b
, &load1
->dest
.ssa
, 2),
693 nir_channel(&b
, &load1
->dest
.ssa
, 3)));
694 packed64
[2] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
695 nir_channel(&b
, &load2
->dest
.ssa
, 0),
696 nir_channel(&b
, &load2
->dest
.ssa
, 1)));
697 packed64
[3] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
698 nir_channel(&b
, &load2
->dest
.ssa
, 2),
699 nir_channel(&b
, &load2
->dest
.ssa
, 3)));
701 /* Compute result. */
702 nir_ssa_def
*num_primitive_written
=
703 nir_isub(&b
, packed64
[3], packed64
[1]);
704 nir_ssa_def
*primitive_storage_needed
=
705 nir_isub(&b
, packed64
[2], packed64
[0]);
707 nir_store_var(&b
, result
,
708 nir_vec2(&b
, num_primitive_written
,
709 primitive_storage_needed
), 0x3);
710 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
712 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
714 /* Determine if result is 64 or 32 bit. */
715 nir_ssa_def
*result_is_64bit
=
716 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
717 nir_ssa_def
*result_size
=
718 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 16),
721 /* Store the result if complete or partial results have been requested. */
722 nir_if
*store_if
= nir_if_create(b
.shader
);
723 store_if
->condition
=
724 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
725 nir_load_var(&b
, available
)));
726 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
728 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
731 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
732 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
733 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
735 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
737 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
738 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
739 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
740 store
->src
[2] = nir_src_for_ssa(output_base
);
741 nir_intrinsic_set_write_mask(store
, 0x3);
742 nir_intrinsic_set_align(store
, 8, 0);
743 store
->num_components
= 2;
744 nir_builder_instr_insert(&b
, &store
->instr
);
746 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
748 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
749 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
750 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
751 store
->src
[2] = nir_src_for_ssa(output_base
);
752 nir_intrinsic_set_write_mask(store
, 0x3);
753 nir_intrinsic_set_align(store
, 4, 0);
754 store
->num_components
= 2;
755 nir_builder_instr_insert(&b
, &store
->instr
);
757 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
759 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
761 radv_store_availability(&b
, flags
, &dst_buf
->dest
.ssa
,
762 nir_iadd(&b
, result_size
, output_base
),
763 nir_b2i32(&b
, nir_load_var(&b
, available
)));
769 build_timestamp_query_shader(struct radv_device
*device
)
771 /* the shader this builds is roughly
773 * uint32_t src_stride = 8;
775 * location(binding = 0) buffer dst_buf;
776 * location(binding = 1) buffer src_buf;
779 * uint64_t result = 0;
780 * bool available = false;
781 * uint64_t src_offset = src_stride * global_id.x;
782 * uint64_t dst_offset = dst_stride * global_id.x;
783 * uint64_t timestamp = src_buf[src_offset];
784 * if (timestamp != TIMESTAMP_NOT_READY) {
785 * result = timestamp;
788 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
789 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
790 * if (flags & VK_QUERY_RESULT_64_BIT) {
791 * dst_buf[dst_offset] = result;
793 * dst_buf[dst_offset] = (uint32_t)result;
796 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
797 * dst_buf[dst_offset + result_size] = available;
802 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
803 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "timestamp_query");
804 b
.shader
->info
.cs
.local_size
[0] = 64;
805 b
.shader
->info
.cs
.local_size
[1] = 1;
806 b
.shader
->info
.cs
.local_size
[2] = 1;
808 /* Create and initialize local variables. */
809 nir_variable
*result
=
810 nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
811 nir_variable
*available
=
812 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
814 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
815 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
817 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
819 /* Load resources. */
820 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
821 nir_intrinsic_vulkan_resource_index
);
822 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
823 dst_buf
->num_components
= 1;
824 nir_intrinsic_set_desc_set(dst_buf
, 0);
825 nir_intrinsic_set_binding(dst_buf
, 0);
826 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
827 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
829 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
830 nir_intrinsic_vulkan_resource_index
);
831 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
832 src_buf
->num_components
= 1;
833 nir_intrinsic_set_desc_set(src_buf
, 0);
834 nir_intrinsic_set_binding(src_buf
, 1);
835 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
836 nir_builder_instr_insert(&b
, &src_buf
->instr
);
838 /* Compute global ID. */
839 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
840 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
841 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
842 b
.shader
->info
.cs
.local_size
[0],
843 b
.shader
->info
.cs
.local_size
[1],
844 b
.shader
->info
.cs
.local_size
[2], 0);
845 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
846 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
848 /* Compute src/dst strides. */
849 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 8);
850 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
851 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
852 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
854 /* Load data from the query pool. */
855 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
856 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
857 load
->src
[1] = nir_src_for_ssa(input_base
);
858 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 32, NULL
);
859 load
->num_components
= 2;
860 nir_intrinsic_set_align(load
, 8, 0);
861 nir_builder_instr_insert(&b
, &load
->instr
);
863 /* Pack the timestamp. */
864 nir_ssa_def
*timestamp
;
865 timestamp
= nir_pack_64_2x32(&b
, nir_vec2(&b
,
866 nir_channel(&b
, &load
->dest
.ssa
, 0),
867 nir_channel(&b
, &load
->dest
.ssa
, 1)));
869 /* Check if result is available. */
870 nir_ssa_def
*result_is_available
=
871 nir_i2b(&b
, nir_ine(&b
, timestamp
,
872 nir_imm_int64(&b
, TIMESTAMP_NOT_READY
)));
874 /* Only store result if available. */
875 nir_if
*available_if
= nir_if_create(b
.shader
);
876 available_if
->condition
= nir_src_for_ssa(result_is_available
);
877 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
879 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
881 nir_store_var(&b
, result
, timestamp
, 0x1);
882 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
884 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
886 /* Determine if result is 64 or 32 bit. */
887 nir_ssa_def
*result_is_64bit
=
888 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
889 nir_ssa_def
*result_size
=
890 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8),
893 /* Store the result if complete or partial results have been requested. */
894 nir_if
*store_if
= nir_if_create(b
.shader
);
895 store_if
->condition
=
896 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
897 nir_load_var(&b
, available
)));
898 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
900 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
903 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
904 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
905 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
907 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
909 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
910 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
911 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
912 store
->src
[2] = nir_src_for_ssa(output_base
);
913 nir_intrinsic_set_write_mask(store
, 0x1);
914 nir_intrinsic_set_align(store
, 8, 0);
915 store
->num_components
= 1;
916 nir_builder_instr_insert(&b
, &store
->instr
);
918 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
920 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
921 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
922 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
923 store
->src
[2] = nir_src_for_ssa(output_base
);
924 nir_intrinsic_set_write_mask(store
, 0x1);
925 nir_intrinsic_set_align(store
, 4, 0);
926 store
->num_components
= 1;
927 nir_builder_instr_insert(&b
, &store
->instr
);
929 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
931 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
933 radv_store_availability(&b
, flags
, &dst_buf
->dest
.ssa
,
934 nir_iadd(&b
, result_size
, output_base
),
935 nir_b2i32(&b
, nir_load_var(&b
, available
)));
940 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device
*device
)
943 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
944 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
945 struct radv_shader_module tfb_cs
= { .nir
= NULL
};
946 struct radv_shader_module timestamp_cs
= { .nir
= NULL
};
948 mtx_lock(&device
->meta_state
.mtx
);
949 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
) {
950 mtx_unlock(&device
->meta_state
.mtx
);
953 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
954 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
955 tfb_cs
.nir
= build_tfb_query_shader(device
);
956 timestamp_cs
.nir
= build_timestamp_query_shader(device
);
958 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
959 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
960 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
962 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
965 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
966 .descriptorCount
= 1,
967 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
968 .pImmutableSamplers
= NULL
972 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
973 .descriptorCount
= 1,
974 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
975 .pImmutableSamplers
= NULL
980 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
981 &occlusion_ds_create_info
,
982 &device
->meta_state
.alloc
,
983 &device
->meta_state
.query
.ds_layout
);
984 if (result
!= VK_SUCCESS
)
987 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
988 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
990 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
991 .pushConstantRangeCount
= 1,
992 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
995 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
996 &occlusion_pl_create_info
,
997 &device
->meta_state
.alloc
,
998 &device
->meta_state
.query
.p_layout
);
999 if (result
!= VK_SUCCESS
)
1002 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
1003 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1004 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1005 .module
= radv_shader_module_to_handle(&occlusion_cs
),
1007 .pSpecializationInfo
= NULL
,
1010 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
1011 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1012 .stage
= occlusion_pipeline_shader_stage
,
1014 .layout
= device
->meta_state
.query
.p_layout
,
1017 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1018 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1019 1, &occlusion_vk_pipeline_info
, NULL
,
1020 &device
->meta_state
.query
.occlusion_query_pipeline
);
1021 if (result
!= VK_SUCCESS
)
1024 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
1025 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1026 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1027 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
1029 .pSpecializationInfo
= NULL
,
1032 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
1033 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1034 .stage
= pipeline_statistics_pipeline_shader_stage
,
1036 .layout
= device
->meta_state
.query
.p_layout
,
1039 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1040 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1041 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
1042 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
1043 if (result
!= VK_SUCCESS
)
1046 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage
= {
1047 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1048 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1049 .module
= radv_shader_module_to_handle(&tfb_cs
),
1051 .pSpecializationInfo
= NULL
,
1054 VkComputePipelineCreateInfo tfb_pipeline_info
= {
1055 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1056 .stage
= tfb_pipeline_shader_stage
,
1058 .layout
= device
->meta_state
.query
.p_layout
,
1061 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1062 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1063 1, &tfb_pipeline_info
, NULL
,
1064 &device
->meta_state
.query
.tfb_query_pipeline
);
1065 if (result
!= VK_SUCCESS
)
1068 VkPipelineShaderStageCreateInfo timestamp_pipeline_shader_stage
= {
1069 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1070 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1071 .module
= radv_shader_module_to_handle(×tamp_cs
),
1073 .pSpecializationInfo
= NULL
,
1076 VkComputePipelineCreateInfo timestamp_pipeline_info
= {
1077 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1078 .stage
= timestamp_pipeline_shader_stage
,
1080 .layout
= device
->meta_state
.query
.p_layout
,
1083 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1084 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1085 1, ×tamp_pipeline_info
, NULL
,
1086 &device
->meta_state
.query
.timestamp_query_pipeline
);
1089 if (result
!= VK_SUCCESS
)
1090 radv_device_finish_meta_query_state(device
);
1091 ralloc_free(occlusion_cs
.nir
);
1092 ralloc_free(pipeline_statistics_cs
.nir
);
1093 ralloc_free(tfb_cs
.nir
);
1094 ralloc_free(timestamp_cs
.nir
);
1095 mtx_unlock(&device
->meta_state
.mtx
);
1099 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
, bool on_demand
)
1104 return radv_device_init_meta_query_state_internal(device
);
1107 void radv_device_finish_meta_query_state(struct radv_device
*device
)
1109 if (device
->meta_state
.query
.tfb_query_pipeline
)
1110 radv_DestroyPipeline(radv_device_to_handle(device
),
1111 device
->meta_state
.query
.tfb_query_pipeline
,
1112 &device
->meta_state
.alloc
);
1114 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
1115 radv_DestroyPipeline(radv_device_to_handle(device
),
1116 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1117 &device
->meta_state
.alloc
);
1119 if (device
->meta_state
.query
.occlusion_query_pipeline
)
1120 radv_DestroyPipeline(radv_device_to_handle(device
),
1121 device
->meta_state
.query
.occlusion_query_pipeline
,
1122 &device
->meta_state
.alloc
);
1124 if (device
->meta_state
.query
.timestamp_query_pipeline
)
1125 radv_DestroyPipeline(radv_device_to_handle(device
),
1126 device
->meta_state
.query
.timestamp_query_pipeline
,
1127 &device
->meta_state
.alloc
);
1129 if (device
->meta_state
.query
.p_layout
)
1130 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
1131 device
->meta_state
.query
.p_layout
,
1132 &device
->meta_state
.alloc
);
1134 if (device
->meta_state
.query
.ds_layout
)
1135 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
1136 device
->meta_state
.query
.ds_layout
,
1137 &device
->meta_state
.alloc
);
1140 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
1141 VkPipeline
*pipeline
,
1142 struct radeon_winsys_bo
*src_bo
,
1143 struct radeon_winsys_bo
*dst_bo
,
1144 uint64_t src_offset
, uint64_t dst_offset
,
1145 uint32_t src_stride
, uint32_t dst_stride
,
1146 uint32_t count
, uint32_t flags
,
1147 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
1149 struct radv_device
*device
= cmd_buffer
->device
;
1150 struct radv_meta_saved_state saved_state
;
1151 bool old_predicating
;
1154 VkResult ret
= radv_device_init_meta_query_state_internal(device
);
1155 if (ret
!= VK_SUCCESS
) {
1156 cmd_buffer
->record_result
= ret
;
1161 radv_meta_save(&saved_state
, cmd_buffer
,
1162 RADV_META_SAVE_COMPUTE_PIPELINE
|
1163 RADV_META_SAVE_CONSTANTS
|
1164 RADV_META_SAVE_DESCRIPTORS
);
1166 /* VK_EXT_conditional_rendering says that copy commands should not be
1167 * affected by conditional rendering.
1169 old_predicating
= cmd_buffer
->state
.predicating
;
1170 cmd_buffer
->state
.predicating
= false;
1172 struct radv_buffer dst_buffer
= {
1174 .offset
= dst_offset
,
1175 .size
= dst_stride
* count
1178 struct radv_buffer src_buffer
= {
1180 .offset
= src_offset
,
1181 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
1184 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
1185 VK_PIPELINE_BIND_POINT_COMPUTE
, *pipeline
);
1187 radv_meta_push_descriptor_set(cmd_buffer
,
1188 VK_PIPELINE_BIND_POINT_COMPUTE
,
1189 device
->meta_state
.query
.p_layout
,
1191 2, /* descriptorWriteCount */
1192 (VkWriteDescriptorSet
[]) {
1194 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
1196 .dstArrayElement
= 0,
1197 .descriptorCount
= 1,
1198 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
1199 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
1200 .buffer
= radv_buffer_to_handle(&dst_buffer
),
1202 .range
= VK_WHOLE_SIZE
1206 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
1208 .dstArrayElement
= 0,
1209 .descriptorCount
= 1,
1210 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
1211 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
1212 .buffer
= radv_buffer_to_handle(&src_buffer
),
1214 .range
= VK_WHOLE_SIZE
1219 /* Encode the number of elements for easy access by the shader. */
1220 pipeline_stats_mask
&= 0x7ff;
1221 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
1223 avail_offset
-= src_offset
;
1227 uint32_t dst_stride
;
1228 uint32_t pipeline_stats_mask
;
1229 uint32_t avail_offset
;
1230 } push_constants
= {
1233 pipeline_stats_mask
,
1237 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
1238 device
->meta_state
.query
.p_layout
,
1239 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
1242 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_L2
|
1243 RADV_CMD_FLAG_INV_VCACHE
;
1245 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1246 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
1248 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
1250 /* Restore conditional rendering. */
1251 cmd_buffer
->state
.predicating
= old_predicating
;
1253 radv_meta_restore(&saved_state
, cmd_buffer
);
1257 radv_query_pool_needs_gds(struct radv_device
*device
,
1258 struct radv_query_pool
*pool
)
1260 /* The number of primitives generated by geometry shader invocations is
1261 * only counted by the hardware if GS uses the legacy path. When NGG GS
1262 * is used, the hardware can't know the number of generated primitives
1263 * and we have to it manually inside the shader. To achieve that, the
1264 * driver does a plain GDS atomic to accumulate that value.
1265 * TODO: fix use of NGG GS and non-NGG GS inside the same begin/end
1268 return device
->physical_device
->use_ngg_gs
&&
1269 (pool
->pipeline_stats_mask
& VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT
);
1273 radv_destroy_query_pool(struct radv_device
*device
,
1274 const VkAllocationCallbacks
*pAllocator
,
1275 struct radv_query_pool
*pool
)
1278 device
->ws
->buffer_destroy(pool
->bo
);
1279 vk_object_base_finish(&pool
->base
);
1280 vk_free2(&device
->vk
.alloc
, pAllocator
, pool
);
1283 VkResult
radv_CreateQueryPool(
1285 const VkQueryPoolCreateInfo
* pCreateInfo
,
1286 const VkAllocationCallbacks
* pAllocator
,
1287 VkQueryPool
* pQueryPool
)
1289 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1290 struct radv_query_pool
*pool
= vk_alloc2(&device
->vk
.alloc
, pAllocator
,
1292 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1295 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1297 vk_object_base_init(&device
->vk
, &pool
->base
,
1298 VK_OBJECT_TYPE_QUERY_POOL
);
1300 switch(pCreateInfo
->queryType
) {
1301 case VK_QUERY_TYPE_OCCLUSION
:
1302 pool
->stride
= 16 * device
->physical_device
->rad_info
.num_render_backends
;
1304 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1305 pool
->stride
= pipelinestat_block_size
* 2;
1307 case VK_QUERY_TYPE_TIMESTAMP
:
1310 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1314 unreachable("creating unhandled query type");
1317 pool
->type
= pCreateInfo
->queryType
;
1318 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
1319 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
1320 pool
->size
= pool
->availability_offset
;
1321 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1322 pool
->size
+= 4 * pCreateInfo
->queryCount
;
1324 pool
->bo
= device
->ws
->buffer_create(device
->ws
, pool
->size
,
1325 64, RADEON_DOMAIN_GTT
, RADEON_FLAG_NO_INTERPROCESS_SHARING
,
1326 RADV_BO_PRIORITY_QUERY_POOL
);
1328 radv_destroy_query_pool(device
, pAllocator
, pool
);
1329 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1332 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
1334 radv_destroy_query_pool(device
, pAllocator
, pool
);
1335 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1338 *pQueryPool
= radv_query_pool_to_handle(pool
);
1342 void radv_DestroyQueryPool(
1345 const VkAllocationCallbacks
* pAllocator
)
1347 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1348 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
1353 radv_destroy_query_pool(device
, pAllocator
, pool
);
1356 VkResult
radv_GetQueryPoolResults(
1358 VkQueryPool queryPool
,
1359 uint32_t firstQuery
,
1360 uint32_t queryCount
,
1363 VkDeviceSize stride
,
1364 VkQueryResultFlags flags
)
1366 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1367 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1369 VkResult result
= VK_SUCCESS
;
1371 if (radv_device_is_lost(device
))
1372 return VK_ERROR_DEVICE_LOST
;
1374 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
1376 unsigned query
= firstQuery
+ i
;
1377 char *src
= pool
->ptr
+ query
* pool
->stride
;
1380 switch (pool
->type
) {
1381 case VK_QUERY_TYPE_TIMESTAMP
: {
1382 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1383 available
= *src64
!= TIMESTAMP_NOT_READY
;
1385 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1386 while (*src64
== TIMESTAMP_NOT_READY
)
1391 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1392 result
= VK_NOT_READY
;
1394 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1395 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1396 *(uint64_t*)dest
= *src64
;
1399 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1400 *(uint32_t*)dest
= *(volatile uint32_t*)src
;
1405 case VK_QUERY_TYPE_OCCLUSION
: {
1406 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1407 uint32_t db_count
= device
->physical_device
->rad_info
.num_render_backends
;
1408 uint32_t enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
1409 uint64_t sample_count
= 0;
1412 for (int i
= 0; i
< db_count
; ++i
) {
1413 uint64_t start
, end
;
1415 if (!(enabled_rb_mask
& (1 << i
)))
1419 start
= src64
[2 * i
];
1420 end
= src64
[2 * i
+ 1];
1421 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
1423 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
1426 sample_count
+= end
- start
;
1430 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1431 result
= VK_NOT_READY
;
1433 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1434 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1435 *(uint64_t*)dest
= sample_count
;
1438 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1439 *(uint32_t*)dest
= sample_count
;
1444 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
1445 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1446 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
1448 available
= *(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
1450 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1451 result
= VK_NOT_READY
;
1453 const volatile uint64_t *start
= (uint64_t*)src
;
1454 const volatile uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
1455 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1456 uint64_t *dst
= (uint64_t*)dest
;
1457 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
1458 for(int i
= 0; i
< 11; ++i
) {
1459 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1460 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1461 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1462 start
[pipeline_statistics_indices
[i
]];
1468 uint32_t *dst
= (uint32_t*)dest
;
1469 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
1470 for(int i
= 0; i
< 11; ++i
) {
1471 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1472 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1473 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1474 start
[pipeline_statistics_indices
[i
]];
1481 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
: {
1482 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1483 uint64_t num_primitives_written
;
1484 uint64_t primitive_storage_needed
;
1486 /* SAMPLE_STREAMOUTSTATS stores this structure:
1488 * u64 NumPrimitivesWritten;
1489 * u64 PrimitiveStorageNeeded;
1493 for (int j
= 0; j
< 4; j
++) {
1494 if (!(src64
[j
] & 0x8000000000000000UL
))
1498 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1499 result
= VK_NOT_READY
;
1501 num_primitives_written
= src64
[3] - src64
[1];
1502 primitive_storage_needed
= src64
[2] - src64
[0];
1504 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1505 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1506 *(uint64_t *)dest
= num_primitives_written
;
1508 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1509 *(uint64_t *)dest
= primitive_storage_needed
;
1512 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1513 *(uint32_t *)dest
= num_primitives_written
;
1515 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1516 *(uint32_t *)dest
= primitive_storage_needed
;
1522 unreachable("trying to get results of unhandled query type");
1525 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1526 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1527 *(uint64_t*)dest
= available
;
1529 *(uint32_t*)dest
= available
;
1537 static void emit_query_flush(struct radv_cmd_buffer
*cmd_buffer
,
1538 struct radv_query_pool
*pool
)
1540 if (cmd_buffer
->pending_reset_query
) {
1541 if (pool
->size
>= RADV_BUFFER_OPS_CS_THRESHOLD
) {
1542 /* Only need to flush caches if the query pool size is
1543 * large enough to be resetted using the compute shader
1544 * path. Small pools don't need any cache flushes
1545 * because we use a CP dma clear.
1547 si_emit_cache_flush(cmd_buffer
);
1552 void radv_CmdCopyQueryPoolResults(
1553 VkCommandBuffer commandBuffer
,
1554 VkQueryPool queryPool
,
1555 uint32_t firstQuery
,
1556 uint32_t queryCount
,
1558 VkDeviceSize dstOffset
,
1559 VkDeviceSize stride
,
1560 VkQueryResultFlags flags
)
1562 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1563 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1564 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
1565 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1566 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1567 uint64_t dest_va
= radv_buffer_get_va(dst_buffer
->bo
);
1568 dest_va
+= dst_buffer
->offset
+ dstOffset
;
1570 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, pool
->bo
);
1571 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, dst_buffer
->bo
);
1573 /* From the Vulkan spec 1.1.108:
1575 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1576 * previous uses of vkCmdResetQueryPool in the same queue, without any
1577 * additional synchronization."
1579 * So, we have to flush the caches if the compute shader path was used.
1581 emit_query_flush(cmd_buffer
, pool
);
1583 switch (pool
->type
) {
1584 case VK_QUERY_TYPE_OCCLUSION
:
1585 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1586 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1587 unsigned query
= firstQuery
+ i
;
1588 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
1590 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1592 /* Waits on the upper word of the last DB entry */
1593 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1594 src_va
, 0x80000000, 0xffffffff);
1597 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
1598 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1599 dst_buffer
->offset
+ dstOffset
,
1600 pool
->stride
, stride
,
1601 queryCount
, flags
, 0, 0);
1603 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1604 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1605 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1606 unsigned query
= firstQuery
+ i
;
1608 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1610 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1612 /* This waits on the ME. All copies below are done on the ME */
1613 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
,
1614 avail_va
, 1, 0xffffffff);
1617 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1618 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1619 dst_buffer
->offset
+ dstOffset
,
1620 pool
->stride
, stride
, queryCount
, flags
,
1621 pool
->pipeline_stats_mask
,
1622 pool
->availability_offset
+ 4 * firstQuery
);
1624 case VK_QUERY_TYPE_TIMESTAMP
:
1625 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1626 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1627 unsigned query
= firstQuery
+ i
;
1628 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1630 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1632 /* Wait on the high 32 bits of the timestamp in
1633 * case the low part is 0xffffffff.
1635 radv_cp_wait_mem(cs
, WAIT_REG_MEM_NOT_EQUAL
,
1637 TIMESTAMP_NOT_READY
>> 32,
1642 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.timestamp_query_pipeline
,
1643 pool
->bo
, dst_buffer
->bo
,
1644 firstQuery
* pool
->stride
,
1645 dst_buffer
->offset
+ dstOffset
,
1646 pool
->stride
, stride
,
1647 queryCount
, flags
, 0, 0);
1649 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1650 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1651 for(unsigned i
= 0; i
< queryCount
; i
++) {
1652 unsigned query
= firstQuery
+ i
;
1653 uint64_t src_va
= va
+ query
* pool
->stride
;
1655 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7 * 4);
1657 /* Wait on the upper word of all results. */
1658 for (unsigned j
= 0; j
< 4; j
++, src_va
+= 8) {
1659 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1660 src_va
+ 4, 0x80000000,
1666 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.tfb_query_pipeline
,
1667 pool
->bo
, dst_buffer
->bo
,
1668 firstQuery
* pool
->stride
,
1669 dst_buffer
->offset
+ dstOffset
,
1670 pool
->stride
, stride
,
1671 queryCount
, flags
, 0, 0);
1674 unreachable("trying to get results of unhandled query type");
1679 void radv_CmdResetQueryPool(
1680 VkCommandBuffer commandBuffer
,
1681 VkQueryPool queryPool
,
1682 uint32_t firstQuery
,
1683 uint32_t queryCount
)
1685 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1686 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1687 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1688 ? (uint32_t)TIMESTAMP_NOT_READY
: 0;
1689 uint32_t flush_bits
= 0;
1691 /* Make sure to sync all previous work if the given command buffer has
1692 * pending active queries. Otherwise the GPU might write queries data
1693 * after the reset operation.
1695 cmd_buffer
->state
.flush_bits
|= cmd_buffer
->active_query_flush_bits
;
1697 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1698 firstQuery
* pool
->stride
,
1699 queryCount
* pool
->stride
, value
);
1701 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1702 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1703 pool
->availability_offset
+ firstQuery
* 4,
1708 /* Only need to flush caches for the compute shader path. */
1709 cmd_buffer
->pending_reset_query
= true;
1710 cmd_buffer
->state
.flush_bits
|= flush_bits
;
1714 void radv_ResetQueryPool(
1716 VkQueryPool queryPool
,
1717 uint32_t firstQuery
,
1718 uint32_t queryCount
)
1720 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1722 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1723 ? (uint32_t)TIMESTAMP_NOT_READY
: 0;
1724 uint32_t *data
= (uint32_t*)(pool
->ptr
+ firstQuery
* pool
->stride
);
1725 uint32_t *data_end
= (uint32_t*)(pool
->ptr
+ (firstQuery
+ queryCount
) * pool
->stride
);
1727 for(uint32_t *p
= data
; p
!= data_end
; ++p
)
1730 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1731 memset(pool
->ptr
+ pool
->availability_offset
+ firstQuery
* 4,
1736 static unsigned event_type_for_stream(unsigned stream
)
1740 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
1741 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
1742 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
1743 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
1747 static void emit_begin_query(struct radv_cmd_buffer
*cmd_buffer
,
1748 struct radv_query_pool
*pool
,
1750 VkQueryType query_type
,
1751 VkQueryControlFlags flags
,
1754 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1755 switch (query_type
) {
1756 case VK_QUERY_TYPE_OCCLUSION
:
1757 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1759 ++cmd_buffer
->state
.active_occlusion_queries
;
1760 if (cmd_buffer
->state
.active_occlusion_queries
== 1) {
1761 if (flags
& VK_QUERY_CONTROL_PRECISE_BIT
) {
1762 /* This is the first occlusion query, enable
1763 * the hint if the precision bit is set.
1765 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1768 radv_set_db_count_control(cmd_buffer
);
1770 if ((flags
& VK_QUERY_CONTROL_PRECISE_BIT
) &&
1771 !cmd_buffer
->state
.perfect_occlusion_queries_enabled
) {
1772 /* This is not the first query, but this one
1773 * needs to enable precision, DB_COUNT_CONTROL
1774 * has to be updated accordingly.
1776 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1778 radv_set_db_count_control(cmd_buffer
);
1782 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1783 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1784 radeon_emit(cs
, va
);
1785 radeon_emit(cs
, va
>> 32);
1787 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1788 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1790 ++cmd_buffer
->state
.active_pipeline_queries
;
1791 if (cmd_buffer
->state
.active_pipeline_queries
== 1) {
1792 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1793 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_START_PIPELINE_STATS
;
1796 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1797 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1798 radeon_emit(cs
, va
);
1799 radeon_emit(cs
, va
>> 32);
1801 if (radv_query_pool_needs_gds(cmd_buffer
->device
, pool
)) {
1802 int idx
= radv_get_pipeline_statistics_index(VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT
);
1804 /* Make sure GDS is idle before copying the value. */
1805 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1806 RADV_CMD_FLAG_INV_L2
;
1807 si_emit_cache_flush(cmd_buffer
);
1811 si_cs_emit_write_event_eop(cs
,
1812 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1813 radv_cmd_buffer_uses_mec(cmd_buffer
),
1814 V_028A90_PS_DONE
, 0,
1817 va
, EOP_DATA_GDS(0, 1), 0);
1819 /* Record that the command buffer needs GDS. */
1820 cmd_buffer
->gds_needed
= true;
1822 cmd_buffer
->state
.active_pipeline_gds_queries
++;
1825 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1826 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1828 assert(index
< MAX_SO_STREAMS
);
1830 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1831 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1832 radeon_emit(cs
, va
);
1833 radeon_emit(cs
, va
>> 32);
1836 unreachable("beginning unhandled query type");
1841 static void emit_end_query(struct radv_cmd_buffer
*cmd_buffer
,
1842 struct radv_query_pool
*pool
,
1843 uint64_t va
, uint64_t avail_va
,
1844 VkQueryType query_type
, uint32_t index
)
1846 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1847 switch (query_type
) {
1848 case VK_QUERY_TYPE_OCCLUSION
:
1849 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1851 cmd_buffer
->state
.active_occlusion_queries
--;
1852 if (cmd_buffer
->state
.active_occlusion_queries
== 0) {
1853 radv_set_db_count_control(cmd_buffer
);
1855 /* Reset the perfect occlusion queries hint now that no
1856 * queries are active.
1858 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= false;
1861 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1862 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1863 radeon_emit(cs
, va
+ 8);
1864 radeon_emit(cs
, (va
+ 8) >> 32);
1867 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1868 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1870 cmd_buffer
->state
.active_pipeline_queries
--;
1871 if (cmd_buffer
->state
.active_pipeline_queries
== 0) {
1872 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_START_PIPELINE_STATS
;
1873 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1875 va
+= pipelinestat_block_size
;
1877 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1878 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1879 radeon_emit(cs
, va
);
1880 radeon_emit(cs
, va
>> 32);
1882 si_cs_emit_write_event_eop(cs
,
1883 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1884 radv_cmd_buffer_uses_mec(cmd_buffer
),
1885 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1887 EOP_DATA_SEL_VALUE_32BIT
,
1889 cmd_buffer
->gfx9_eop_bug_va
);
1891 if (radv_query_pool_needs_gds(cmd_buffer
->device
, pool
)) {
1892 int idx
= radv_get_pipeline_statistics_index(VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT
);
1894 /* Make sure GDS is idle before copying the value. */
1895 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1896 RADV_CMD_FLAG_INV_L2
;
1897 si_emit_cache_flush(cmd_buffer
);
1901 si_cs_emit_write_event_eop(cs
,
1902 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1903 radv_cmd_buffer_uses_mec(cmd_buffer
),
1904 V_028A90_PS_DONE
, 0,
1907 va
, EOP_DATA_GDS(0, 1), 0);
1909 cmd_buffer
->state
.active_pipeline_gds_queries
--;
1912 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1913 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1915 assert(index
< MAX_SO_STREAMS
);
1917 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1918 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1919 radeon_emit(cs
, (va
+ 16));
1920 radeon_emit(cs
, (va
+ 16) >> 32);
1923 unreachable("ending unhandled query type");
1926 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1927 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1928 RADV_CMD_FLAG_INV_L2
|
1929 RADV_CMD_FLAG_INV_VCACHE
;
1930 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1931 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1932 RADV_CMD_FLAG_FLUSH_AND_INV_DB
;
1936 void radv_CmdBeginQueryIndexedEXT(
1937 VkCommandBuffer commandBuffer
,
1938 VkQueryPool queryPool
,
1940 VkQueryControlFlags flags
,
1943 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1944 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1945 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1946 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1948 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1950 emit_query_flush(cmd_buffer
, pool
);
1952 va
+= pool
->stride
* query
;
1954 emit_begin_query(cmd_buffer
, pool
, va
, pool
->type
, flags
, index
);
1957 void radv_CmdBeginQuery(
1958 VkCommandBuffer commandBuffer
,
1959 VkQueryPool queryPool
,
1961 VkQueryControlFlags flags
)
1963 radv_CmdBeginQueryIndexedEXT(commandBuffer
, queryPool
, query
, flags
, 0);
1966 void radv_CmdEndQueryIndexedEXT(
1967 VkCommandBuffer commandBuffer
,
1968 VkQueryPool queryPool
,
1972 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1973 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1974 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1975 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1976 va
+= pool
->stride
* query
;
1978 /* Do not need to add the pool BO to the list because the query must
1979 * currently be active, which means the BO is already in the list.
1981 emit_end_query(cmd_buffer
, pool
, va
, avail_va
, pool
->type
, index
);
1984 * For multiview we have to emit a query for each bit in the mask,
1985 * however the first query we emit will get the totals for all the
1986 * operations, so we don't want to get a real value in the other
1987 * queries. This emits a fake begin/end sequence so the waiting
1988 * code gets a completed query value and doesn't hang, but the
1991 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
) {
1992 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1995 for (unsigned i
= 1; i
< util_bitcount(cmd_buffer
->state
.subpass
->view_mask
); i
++) {
1998 emit_begin_query(cmd_buffer
, pool
, va
, pool
->type
, 0, 0);
1999 emit_end_query(cmd_buffer
, pool
, va
, avail_va
, pool
->type
, 0);
2004 void radv_CmdEndQuery(
2005 VkCommandBuffer commandBuffer
,
2006 VkQueryPool queryPool
,
2009 radv_CmdEndQueryIndexedEXT(commandBuffer
, queryPool
, query
, 0);
2012 void radv_CmdWriteTimestamp(
2013 VkCommandBuffer commandBuffer
,
2014 VkPipelineStageFlagBits pipelineStage
,
2015 VkQueryPool queryPool
,
2018 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2019 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
2020 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
2021 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
2022 uint64_t va
= radv_buffer_get_va(pool
->bo
);
2023 uint64_t query_va
= va
+ pool
->stride
* query
;
2025 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
2027 emit_query_flush(cmd_buffer
, pool
);
2029 int num_queries
= 1;
2030 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
)
2031 num_queries
= util_bitcount(cmd_buffer
->state
.subpass
->view_mask
);
2033 ASSERTED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28 * num_queries
);
2035 for (unsigned i
= 0; i
< num_queries
; i
++) {
2036 switch(pipelineStage
) {
2037 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
2038 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
2039 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
2040 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
2041 COPY_DATA_DST_SEL(V_370_MEM
));
2044 radeon_emit(cs
, query_va
);
2045 radeon_emit(cs
, query_va
>> 32);
2048 si_cs_emit_write_event_eop(cs
,
2049 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
2051 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
2053 EOP_DATA_SEL_TIMESTAMP
,
2055 cmd_buffer
->gfx9_eop_bug_va
);
2058 query_va
+= pool
->stride
;
2061 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
2062 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
2063 RADV_CMD_FLAG_INV_L2
|
2064 RADV_CMD_FLAG_INV_VCACHE
;
2065 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
2066 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
2067 RADV_CMD_FLAG_FLUSH_AND_INV_DB
;
2070 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);