2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
39 static const int pipelinestat_block_size
= 11 * 8;
40 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
42 static unsigned get_max_db(struct radv_device
*device
)
44 unsigned num_db
= device
->physical_device
->rad_info
.num_render_backends
;
45 MAYBE_UNUSED
unsigned rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
47 if (device
->physical_device
->rad_info
.chip_class
== SI
)
50 num_db
= MAX2(8, num_db
);
52 /* Otherwise we need to change the query reset procedure */
53 assert(rb_mask
== ((1ull << num_db
) - 1));
58 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
60 nir_ssa_def
*counter
= nir_load_var(b
, var
);
62 nir_if
*if_stmt
= nir_if_create(b
->shader
);
63 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
64 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
66 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
68 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
69 nir_builder_instr_insert(b
, &instr
->instr
);
71 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
72 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
73 nir_store_var(b
, var
, counter
, 0x1);
76 static struct nir_ssa_def
*
77 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
79 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
80 nir_intrinsic_set_base(flags
, 0);
81 nir_intrinsic_set_range(flags
, 16);
82 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
83 flags
->num_components
= 1;
84 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
85 nir_builder_instr_insert(b
, &flags
->instr
);
86 return &flags
->dest
.ssa
;
90 build_occlusion_query_shader(struct radv_device
*device
) {
91 /* the shader this builds is roughly
95 * uint32_t dst_stride;
98 * uint32_t src_stride = 16 * db_count;
100 * location(binding = 0) buffer dst_buf;
101 * location(binding = 1) buffer src_buf;
104 * uint64_t result = 0;
105 * uint64_t src_offset = src_stride * global_id.x;
106 * uint64_t dst_offset = dst_stride * global_id.x;
107 * bool available = true;
108 * for (int i = 0; i < db_count; ++i) {
109 * uint64_t start = src_buf[src_offset + 16 * i];
110 * uint64_t end = src_buf[src_offset + 16 * i + 8];
111 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
112 * result += end - start;
116 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
117 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
118 * if (flags & VK_QUERY_RESULT_64_BIT)
119 * dst_buf[dst_offset] = result;
121 * dst_buf[dst_offset] = (uint32_t)result.
123 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
124 * dst_buf[dst_offset + elem_size] = available;
129 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
130 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
131 b
.shader
->info
.cs
.local_size
[0] = 64;
132 b
.shader
->info
.cs
.local_size
[1] = 1;
133 b
.shader
->info
.cs
.local_size
[2] = 1;
135 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
136 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
137 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
138 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
139 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_int_type(), "available");
140 unsigned db_count
= get_max_db(device
);
142 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
144 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
145 nir_intrinsic_vulkan_resource_index
);
146 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
147 nir_intrinsic_set_desc_set(dst_buf
, 0);
148 nir_intrinsic_set_binding(dst_buf
, 0);
149 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
150 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
152 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
153 nir_intrinsic_vulkan_resource_index
);
154 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
155 nir_intrinsic_set_desc_set(src_buf
, 0);
156 nir_intrinsic_set_binding(src_buf
, 1);
157 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
158 nir_builder_instr_insert(&b
, &src_buf
->instr
);
160 nir_ssa_def
*invoc_id
= nir_load_system_value(&b
, nir_intrinsic_load_local_invocation_id
, 0);
161 nir_ssa_def
*wg_id
= nir_load_system_value(&b
, nir_intrinsic_load_work_group_id
, 0);
162 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
163 b
.shader
->info
.cs
.local_size
[0],
164 b
.shader
->info
.cs
.local_size
[1],
165 b
.shader
->info
.cs
.local_size
[2], 0);
166 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
167 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
169 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
170 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
171 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
172 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
175 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
176 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
177 nir_store_var(&b
, available
, nir_imm_int(&b
, 1), 0x1);
179 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
180 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
181 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
183 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
184 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
186 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
187 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
189 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
190 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
191 load
->src
[1] = nir_src_for_ssa(load_offset
);
192 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
193 load
->num_components
= 2;
194 nir_builder_instr_insert(&b
, &load
->instr
);
196 const unsigned swizzle0
[] = {0,0,0,0};
197 const unsigned swizzle1
[] = {1,1,1,1};
198 nir_store_var(&b
, start
, nir_swizzle(&b
, &load
->dest
.ssa
, swizzle0
, 1, false), 0x1);
199 nir_store_var(&b
, end
, nir_swizzle(&b
, &load
->dest
.ssa
, swizzle1
, 1, false), 0x1);
201 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
202 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
204 nir_if
*update_if
= nir_if_create(b
.shader
);
205 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
206 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
208 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
210 nir_store_var(&b
, result
,
211 nir_iadd(&b
, nir_load_var(&b
, result
),
212 nir_isub(&b
, nir_load_var(&b
, end
),
213 nir_load_var(&b
, start
))), 0x1);
215 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
217 nir_store_var(&b
, available
, nir_imm_int(&b
, 0), 0x1);
219 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
221 /* Store the result if complete or if partial results have been requested. */
223 nir_ssa_def
*result_is_64bit
= nir_iand(&b
, flags
,
224 nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
225 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
227 nir_if
*store_if
= nir_if_create(b
.shader
);
228 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)), nir_load_var(&b
, available
)));
229 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
231 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
233 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
234 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
235 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
237 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
239 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
240 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
241 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
242 store
->src
[2] = nir_src_for_ssa(output_base
);
243 nir_intrinsic_set_write_mask(store
, 0x1);
244 store
->num_components
= 1;
245 nir_builder_instr_insert(&b
, &store
->instr
);
247 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
249 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
250 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
251 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
252 store
->src
[2] = nir_src_for_ssa(output_base
);
253 nir_intrinsic_set_write_mask(store
, 0x1);
254 store
->num_components
= 1;
255 nir_builder_instr_insert(&b
, &store
->instr
);
257 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
259 /* Store the availability bit if requested. */
261 nir_if
*availability_if
= nir_if_create(b
.shader
);
262 availability_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
263 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
265 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
267 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
268 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, available
));
269 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
270 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
271 nir_intrinsic_set_write_mask(store
, 0x1);
272 store
->num_components
= 1;
273 nir_builder_instr_insert(&b
, &store
->instr
);
279 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
280 /* the shader this builds is roughly
284 * uint32_t dst_stride;
285 * uint32_t stats_mask;
286 * uint32_t avail_offset;
289 * uint32_t src_stride = pipelinestat_block_size * 2;
291 * location(binding = 0) buffer dst_buf;
292 * location(binding = 1) buffer src_buf;
295 * uint64_t src_offset = src_stride * global_id.x;
296 * uint64_t dst_base = dst_stride * global_id.x;
297 * uint64_t dst_offset = dst_base;
298 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
299 * uint32_t elem_count = stats_mask >> 16;
300 * uint32_t available = src_buf[avail_offset + 4 * global_id.x];
301 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
302 * dst_buf[dst_offset + elem_count * elem_size] = available;
305 * // repeat 11 times:
306 * if (stats_mask & (1 << 0)) {
307 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
308 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
309 * uint64_t result = end - start;
310 * if (flags & VK_QUERY_RESULT_64_BIT)
311 * dst_buf[dst_offset] = result;
313 * dst_buf[dst_offset] = (uint32_t)result.
314 * dst_offset += elem_size;
316 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
317 * // Set everything to 0 as we don't know what is valid.
318 * for (int i = 0; i < elem_count; ++i)
319 * dst_buf[dst_base + elem_size * i] = 0;
324 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
325 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
326 b
.shader
->info
.cs
.local_size
[0] = 64;
327 b
.shader
->info
.cs
.local_size
[1] = 1;
328 b
.shader
->info
.cs
.local_size
[2] = 1;
330 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
332 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
333 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
334 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
336 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
337 nir_intrinsic_vulkan_resource_index
);
338 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
339 nir_intrinsic_set_desc_set(dst_buf
, 0);
340 nir_intrinsic_set_binding(dst_buf
, 0);
341 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
342 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
344 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
345 nir_intrinsic_vulkan_resource_index
);
346 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
347 nir_intrinsic_set_desc_set(src_buf
, 0);
348 nir_intrinsic_set_binding(src_buf
, 1);
349 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
350 nir_builder_instr_insert(&b
, &src_buf
->instr
);
352 nir_ssa_def
*invoc_id
= nir_load_system_value(&b
, nir_intrinsic_load_local_invocation_id
, 0);
353 nir_ssa_def
*wg_id
= nir_load_system_value(&b
, nir_intrinsic_load_work_group_id
, 0);
354 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
355 b
.shader
->info
.cs
.local_size
[0],
356 b
.shader
->info
.cs
.local_size
[1],
357 b
.shader
->info
.cs
.local_size
[2], 0);
358 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
359 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
361 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
362 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
363 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
364 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
367 avail_offset
= nir_iadd(&b
, avail_offset
,
368 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
370 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
371 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
372 load
->src
[1] = nir_src_for_ssa(avail_offset
);
373 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
374 load
->num_components
= 1;
375 nir_builder_instr_insert(&b
, &load
->instr
);
376 nir_ssa_def
*available
= &load
->dest
.ssa
;
378 nir_ssa_def
*result_is_64bit
= nir_iand(&b
, flags
,
379 nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
380 nir_ssa_def
*elem_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
381 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
383 /* Store the availability bit if requested. */
385 nir_if
*availability_if
= nir_if_create(b
.shader
);
386 availability_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
387 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
389 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
391 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
392 store
->src
[0] = nir_src_for_ssa(available
);
393 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
394 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)));
395 nir_intrinsic_set_write_mask(store
, 0x1);
396 store
->num_components
= 1;
397 nir_builder_instr_insert(&b
, &store
->instr
);
399 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
401 nir_if
*available_if
= nir_if_create(b
.shader
);
402 available_if
->condition
= nir_src_for_ssa(available
);
403 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
405 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
407 nir_store_var(&b
, output_offset
, output_base
, 0x1);
408 for (int i
= 0; i
< 11; ++i
) {
409 nir_if
*store_if
= nir_if_create(b
.shader
);
410 store_if
->condition
= nir_src_for_ssa(nir_iand(&b
, stats_mask
, nir_imm_int(&b
, 1u << i
)));
411 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
413 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
415 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
416 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
417 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
418 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
419 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
420 load
->num_components
= 1;
421 nir_builder_instr_insert(&b
, &load
->instr
);
422 nir_ssa_def
*start
= &load
->dest
.ssa
;
424 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
425 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
426 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
427 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
428 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
429 load
->num_components
= 1;
430 nir_builder_instr_insert(&b
, &load
->instr
);
431 nir_ssa_def
*end
= &load
->dest
.ssa
;
433 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
436 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
437 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
438 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
440 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
442 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
443 store
->src
[0] = nir_src_for_ssa(result
);
444 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
445 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
446 nir_intrinsic_set_write_mask(store
, 0x1);
447 store
->num_components
= 1;
448 nir_builder_instr_insert(&b
, &store
->instr
);
450 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
452 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
453 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
454 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
455 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
456 nir_intrinsic_set_write_mask(store
, 0x1);
457 store
->num_components
= 1;
458 nir_builder_instr_insert(&b
, &store
->instr
);
460 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
462 nir_store_var(&b
, output_offset
,
463 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
466 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
469 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
471 available_if
= nir_if_create(b
.shader
);
472 available_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
,
473 nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)));
474 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
476 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
478 /* Stores zeros in all outputs. */
480 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
481 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
483 nir_loop
*loop
= nir_loop_create(b
.shader
);
484 nir_builder_cf_insert(&b
, &loop
->cf_node
);
485 b
.cursor
= nir_after_cf_list(&loop
->body
);
487 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
488 radv_break_on_count(&b
, counter
, elem_count
);
490 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
491 nir_imul(&b
, elem_size
, current_counter
));
493 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
494 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
495 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
497 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
499 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
500 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
501 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
502 store
->src
[2] = nir_src_for_ssa(output_elem
);
503 nir_intrinsic_set_write_mask(store
, 0x1);
504 store
->num_components
= 1;
505 nir_builder_instr_insert(&b
, &store
->instr
);
507 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
509 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
510 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
511 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
512 store
->src
[2] = nir_src_for_ssa(output_elem
);
513 nir_intrinsic_set_write_mask(store
, 0x1);
514 store
->num_components
= 1;
515 nir_builder_instr_insert(&b
, &store
->instr
);
517 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
521 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
)
524 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
525 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
527 zero(device
->meta_state
.query
);
529 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
530 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
532 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
533 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
534 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
536 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
539 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
540 .descriptorCount
= 1,
541 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
542 .pImmutableSamplers
= NULL
546 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
547 .descriptorCount
= 1,
548 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
549 .pImmutableSamplers
= NULL
554 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
555 &occlusion_ds_create_info
,
556 &device
->meta_state
.alloc
,
557 &device
->meta_state
.query
.ds_layout
);
558 if (result
!= VK_SUCCESS
)
561 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
562 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
564 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
565 .pushConstantRangeCount
= 1,
566 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
569 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
570 &occlusion_pl_create_info
,
571 &device
->meta_state
.alloc
,
572 &device
->meta_state
.query
.p_layout
);
573 if (result
!= VK_SUCCESS
)
576 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
577 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
578 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
579 .module
= radv_shader_module_to_handle(&occlusion_cs
),
581 .pSpecializationInfo
= NULL
,
584 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
585 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
586 .stage
= occlusion_pipeline_shader_stage
,
588 .layout
= device
->meta_state
.query
.p_layout
,
591 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
592 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
593 1, &occlusion_vk_pipeline_info
, NULL
,
594 &device
->meta_state
.query
.occlusion_query_pipeline
);
595 if (result
!= VK_SUCCESS
)
598 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
599 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
600 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
601 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
603 .pSpecializationInfo
= NULL
,
606 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
607 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
608 .stage
= pipeline_statistics_pipeline_shader_stage
,
610 .layout
= device
->meta_state
.query
.p_layout
,
613 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
614 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
615 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
616 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
619 if (result
!= VK_SUCCESS
)
620 radv_device_finish_meta_query_state(device
);
621 ralloc_free(occlusion_cs
.nir
);
622 ralloc_free(pipeline_statistics_cs
.nir
);
626 void radv_device_finish_meta_query_state(struct radv_device
*device
)
628 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
629 radv_DestroyPipeline(radv_device_to_handle(device
),
630 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
631 &device
->meta_state
.alloc
);
633 if (device
->meta_state
.query
.occlusion_query_pipeline
)
634 radv_DestroyPipeline(radv_device_to_handle(device
),
635 device
->meta_state
.query
.occlusion_query_pipeline
,
636 &device
->meta_state
.alloc
);
638 if (device
->meta_state
.query
.p_layout
)
639 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
640 device
->meta_state
.query
.p_layout
,
641 &device
->meta_state
.alloc
);
643 if (device
->meta_state
.query
.ds_layout
)
644 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
645 device
->meta_state
.query
.ds_layout
,
646 &device
->meta_state
.alloc
);
649 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
651 struct radeon_winsys_bo
*src_bo
,
652 struct radeon_winsys_bo
*dst_bo
,
653 uint64_t src_offset
, uint64_t dst_offset
,
654 uint32_t src_stride
, uint32_t dst_stride
,
655 uint32_t count
, uint32_t flags
,
656 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
658 struct radv_device
*device
= cmd_buffer
->device
;
659 struct radv_meta_saved_compute_state saved_state
;
661 radv_meta_save_compute(&saved_state
, cmd_buffer
, 4);
663 struct radv_buffer dst_buffer
= {
665 .offset
= dst_offset
,
666 .size
= dst_stride
* count
669 struct radv_buffer src_buffer
= {
671 .offset
= src_offset
,
672 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
675 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
676 VK_PIPELINE_BIND_POINT_COMPUTE
, pipeline
);
678 radv_meta_push_descriptor_set(cmd_buffer
,
679 VK_PIPELINE_BIND_POINT_COMPUTE
,
680 device
->meta_state
.query
.p_layout
,
682 2, /* descriptorWriteCount */
683 (VkWriteDescriptorSet
[]) {
685 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
687 .dstArrayElement
= 0,
688 .descriptorCount
= 1,
689 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
690 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
691 .buffer
= radv_buffer_to_handle(&dst_buffer
),
693 .range
= VK_WHOLE_SIZE
697 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
699 .dstArrayElement
= 0,
700 .descriptorCount
= 1,
701 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
702 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
703 .buffer
= radv_buffer_to_handle(&src_buffer
),
705 .range
= VK_WHOLE_SIZE
710 /* Encode the number of elements for easy access by the shader. */
711 pipeline_stats_mask
&= 0x7ff;
712 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
714 avail_offset
-= src_offset
;
719 uint32_t pipeline_stats_mask
;
720 uint32_t avail_offset
;
728 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
729 device
->meta_state
.query
.p_layout
,
730 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
733 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
734 RADV_CMD_FLAG_INV_VMEM_L1
;
736 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
737 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
739 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
741 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
742 RADV_CMD_FLAG_INV_VMEM_L1
|
743 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
;
745 radv_meta_restore_compute(&saved_state
, cmd_buffer
, 4);
748 VkResult
radv_CreateQueryPool(
750 const VkQueryPoolCreateInfo
* pCreateInfo
,
751 const VkAllocationCallbacks
* pAllocator
,
752 VkQueryPool
* pQueryPool
)
754 RADV_FROM_HANDLE(radv_device
, device
, _device
);
756 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
758 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
761 return VK_ERROR_OUT_OF_HOST_MEMORY
;
764 switch(pCreateInfo
->queryType
) {
765 case VK_QUERY_TYPE_OCCLUSION
:
766 pool
->stride
= 16 * get_max_db(device
);
768 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
769 pool
->stride
= pipelinestat_block_size
* 2;
771 case VK_QUERY_TYPE_TIMESTAMP
:
775 unreachable("creating unhandled query type");
778 pool
->type
= pCreateInfo
->queryType
;
779 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
780 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
781 size
= pool
->availability_offset
;
782 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_TIMESTAMP
||
783 pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
784 size
+= 4 * pCreateInfo
->queryCount
;
786 pool
->bo
= device
->ws
->buffer_create(device
->ws
, size
,
787 64, RADEON_DOMAIN_GTT
, 0);
790 vk_free2(&device
->alloc
, pAllocator
, pool
);
791 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
794 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
797 device
->ws
->buffer_destroy(pool
->bo
);
798 vk_free2(&device
->alloc
, pAllocator
, pool
);
799 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
801 memset(pool
->ptr
, 0, size
);
803 *pQueryPool
= radv_query_pool_to_handle(pool
);
807 void radv_DestroyQueryPool(
810 const VkAllocationCallbacks
* pAllocator
)
812 RADV_FROM_HANDLE(radv_device
, device
, _device
);
813 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
818 device
->ws
->buffer_destroy(pool
->bo
);
819 vk_free2(&device
->alloc
, pAllocator
, pool
);
822 VkResult
radv_GetQueryPoolResults(
824 VkQueryPool queryPool
,
830 VkQueryResultFlags flags
)
832 RADV_FROM_HANDLE(radv_device
, device
, _device
);
833 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
835 VkResult result
= VK_SUCCESS
;
837 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
839 unsigned query
= firstQuery
+ i
;
840 char *src
= pool
->ptr
+ query
* pool
->stride
;
843 if (pool
->type
!= VK_QUERY_TYPE_OCCLUSION
) {
844 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
845 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
847 available
= *(uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
850 switch (pool
->type
) {
851 case VK_QUERY_TYPE_TIMESTAMP
: {
852 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
853 result
= VK_NOT_READY
;
858 if (flags
& VK_QUERY_RESULT_64_BIT
) {
859 *(uint64_t*)dest
= *(uint64_t*)src
;
862 *(uint32_t*)dest
= *(uint32_t*)src
;
867 case VK_QUERY_TYPE_OCCLUSION
: {
868 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
869 uint64_t sample_count
= 0;
870 int db_count
= get_max_db(device
);
873 for (int i
= 0; i
< db_count
; ++i
) {
876 start
= src64
[2 * i
];
877 end
= src64
[2 * i
+ 1];
878 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
880 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
883 sample_count
+= end
- start
;
887 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
888 result
= VK_NOT_READY
;
893 if (flags
& VK_QUERY_RESULT_64_BIT
) {
894 *(uint64_t*)dest
= sample_count
;
897 *(uint32_t*)dest
= sample_count
;
902 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
903 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
904 result
= VK_NOT_READY
;
909 const uint64_t *start
= (uint64_t*)src
;
910 const uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
911 if (flags
& VK_QUERY_RESULT_64_BIT
) {
912 uint64_t *dst
= (uint64_t*)dest
;
913 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
914 for(int i
= 0; i
< 11; ++i
)
915 if(pool
->pipeline_stats_mask
& (1u << i
))
916 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
917 start
[pipeline_statistics_indices
[i
]];
920 uint32_t *dst
= (uint32_t*)dest
;
921 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
922 for(int i
= 0; i
< 11; ++i
)
923 if(pool
->pipeline_stats_mask
& (1u << i
))
924 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
925 start
[pipeline_statistics_indices
[i
]];
930 unreachable("trying to get results of unhandled query type");
933 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
934 if (flags
& VK_QUERY_RESULT_64_BIT
) {
935 *(uint64_t*)dest
= available
;
937 *(uint32_t*)dest
= available
;
945 void radv_CmdCopyQueryPoolResults(
946 VkCommandBuffer commandBuffer
,
947 VkQueryPool queryPool
,
951 VkDeviceSize dstOffset
,
953 VkQueryResultFlags flags
)
955 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
956 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
957 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
958 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
959 unsigned elem_size
= (flags
& VK_QUERY_RESULT_64_BIT
) ? 8 : 4;
960 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
961 uint64_t dest_va
= cmd_buffer
->device
->ws
->buffer_get_va(dst_buffer
->bo
);
962 dest_va
+= dst_buffer
->offset
+ dstOffset
;
964 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, pool
->bo
, 8);
965 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, dst_buffer
->bo
, 8);
967 switch (pool
->type
) {
968 case VK_QUERY_TYPE_OCCLUSION
:
969 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
970 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
971 unsigned query
= firstQuery
+ i
;
972 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
974 /* Waits on the upper word of the last DB entry */
975 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
976 radeon_emit(cs
, 5 | WAIT_REG_MEM_MEM_SPACE(1));
977 radeon_emit(cs
, src_va
);
978 radeon_emit(cs
, src_va
>> 32);
979 radeon_emit(cs
, 0x80000000); /* reference value */
980 radeon_emit(cs
, 0xffffffff); /* mask */
981 radeon_emit(cs
, 4); /* poll interval */
984 radv_query_shader(cmd_buffer
, cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
985 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
986 dst_buffer
->offset
+ dstOffset
,
987 get_max_db(cmd_buffer
->device
) * 16, stride
,
988 queryCount
, flags
, 0, 0);
990 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
991 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
992 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
993 unsigned query
= firstQuery
+ i
;
995 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
997 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
999 /* This waits on the ME. All copies below are done on the ME */
1000 si_emit_wait_fence(cs
, avail_va
, 1, 0xffffffff);
1003 radv_query_shader(cmd_buffer
, cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1004 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1005 dst_buffer
->offset
+ dstOffset
,
1006 pipelinestat_block_size
* 2, stride
, queryCount
, flags
,
1007 pool
->pipeline_stats_mask
,
1008 pool
->availability_offset
+ 4 * firstQuery
);
1010 case VK_QUERY_TYPE_TIMESTAMP
:
1011 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1012 unsigned query
= firstQuery
+ i
;
1013 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1015 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 19);
1018 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1019 /* TODO, not sure if there is any case where we won't always be ready yet */
1020 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1022 /* This waits on the ME. All copies below are done on the ME */
1023 si_emit_wait_fence(cs
, avail_va
, 1, 0xffffffff);
1025 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1026 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1027 uint64_t avail_dest_va
= dest_va
+ elem_size
;
1029 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1030 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
1031 COPY_DATA_DST_SEL(COPY_DATA_MEM
));
1032 radeon_emit(cs
, avail_va
);
1033 radeon_emit(cs
, avail_va
>> 32);
1034 radeon_emit(cs
, avail_dest_va
);
1035 radeon_emit(cs
, avail_dest_va
>> 32);
1038 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1039 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
1040 COPY_DATA_DST_SEL(COPY_DATA_MEM
) |
1041 ((flags
& VK_QUERY_RESULT_64_BIT
) ? COPY_DATA_COUNT_SEL
: 0));
1042 radeon_emit(cs
, local_src_va
);
1043 radeon_emit(cs
, local_src_va
>> 32);
1044 radeon_emit(cs
, dest_va
);
1045 radeon_emit(cs
, dest_va
>> 32);
1048 assert(cs
->cdw
<= cdw_max
);
1052 unreachable("trying to get results of unhandled query type");
1057 void radv_CmdResetQueryPool(
1058 VkCommandBuffer commandBuffer
,
1059 VkQueryPool queryPool
,
1060 uint32_t firstQuery
,
1061 uint32_t queryCount
)
1063 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1064 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1065 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
1067 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, pool
->bo
, 8);
1069 si_cp_dma_clear_buffer(cmd_buffer
, va
+ firstQuery
* pool
->stride
,
1070 queryCount
* pool
->stride
, 0);
1071 if (pool
->type
== VK_QUERY_TYPE_TIMESTAMP
||
1072 pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1073 si_cp_dma_clear_buffer(cmd_buffer
, va
+ pool
->availability_offset
+ firstQuery
* 4,
1077 void radv_CmdBeginQuery(
1078 VkCommandBuffer commandBuffer
,
1079 VkQueryPool queryPool
,
1081 VkQueryControlFlags flags
)
1083 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1084 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1085 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
1086 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
1087 va
+= pool
->stride
* query
;
1089 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 8);
1091 switch (pool
->type
) {
1092 case VK_QUERY_TYPE_OCCLUSION
:
1093 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1095 ++cmd_buffer
->state
.active_occlusion_queries
;
1096 if (cmd_buffer
->state
.active_occlusion_queries
== 1)
1097 radv_set_db_count_control(cmd_buffer
);
1099 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1100 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1101 radeon_emit(cs
, va
);
1102 radeon_emit(cs
, va
>> 32);
1104 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1105 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1107 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1108 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1109 radeon_emit(cs
, va
);
1110 radeon_emit(cs
, va
>> 32);
1113 unreachable("beginning unhandled query type");
1118 void radv_CmdEndQuery(
1119 VkCommandBuffer commandBuffer
,
1120 VkQueryPool queryPool
,
1123 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1124 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1125 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
1126 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
1127 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1128 va
+= pool
->stride
* query
;
1130 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 8);
1132 switch (pool
->type
) {
1133 case VK_QUERY_TYPE_OCCLUSION
:
1134 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1136 cmd_buffer
->state
.active_occlusion_queries
--;
1137 if (cmd_buffer
->state
.active_occlusion_queries
== 0)
1138 radv_set_db_count_control(cmd_buffer
);
1140 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1141 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1142 radeon_emit(cs
, va
+ 8);
1143 radeon_emit(cs
, (va
+ 8) >> 32);
1146 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1147 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1149 va
+= pipelinestat_block_size
;
1151 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1152 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1153 radeon_emit(cs
, va
);
1154 radeon_emit(cs
, va
>> 32);
1156 si_cs_emit_write_event_eop(cs
,
1157 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1159 EVENT_TYPE_BOTTOM_OF_PIPE_TS
, 0,
1163 unreachable("ending unhandled query type");
1167 void radv_CmdWriteTimestamp(
1168 VkCommandBuffer commandBuffer
,
1169 VkPipelineStageFlagBits pipelineStage
,
1170 VkQueryPool queryPool
,
1173 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1174 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1175 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
1176 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
1177 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
1178 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1179 uint64_t query_va
= va
+ pool
->stride
* query
;
1181 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 5);
1183 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28);
1185 switch(pipelineStage
) {
1186 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1187 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1188 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
1189 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
1190 COPY_DATA_DST_SEL(V_370_MEM_ASYNC
));
1193 radeon_emit(cs
, query_va
);
1194 radeon_emit(cs
, query_va
>> 32);
1196 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
1197 radeon_emit(cs
, S_370_DST_SEL(V_370_MEM_ASYNC
) |
1198 S_370_WR_CONFIRM(1) |
1199 S_370_ENGINE_SEL(V_370_ME
));
1200 radeon_emit(cs
, avail_va
);
1201 radeon_emit(cs
, avail_va
>> 32);
1205 si_cs_emit_write_event_eop(cs
,
1206 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1208 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1210 si_cs_emit_write_event_eop(cs
,
1211 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1213 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1218 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);