2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 #define TIMESTAMP_NOT_READY UINT64_MAX
40 static const int pipelinestat_block_size
= 11 * 8;
41 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
43 static unsigned get_max_db(struct radv_device
*device
)
45 unsigned num_db
= device
->physical_device
->rad_info
.num_render_backends
;
46 MAYBE_UNUSED
unsigned rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask
== ((1ull << num_db
) - 1));
54 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
56 nir_ssa_def
*counter
= nir_load_var(b
, var
);
58 nir_if
*if_stmt
= nir_if_create(b
->shader
);
59 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
60 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
62 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
64 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
65 nir_builder_instr_insert(b
, &instr
->instr
);
67 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
68 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
69 nir_store_var(b
, var
, counter
, 0x1);
72 static struct nir_ssa_def
*
73 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
75 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
76 nir_intrinsic_set_base(flags
, 0);
77 nir_intrinsic_set_range(flags
, 16);
78 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
79 flags
->num_components
= 1;
80 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
81 nir_builder_instr_insert(b
, &flags
->instr
);
82 return &flags
->dest
.ssa
;
86 build_occlusion_query_shader(struct radv_device
*device
) {
87 /* the shader this builds is roughly
91 * uint32_t dst_stride;
94 * uint32_t src_stride = 16 * db_count;
96 * location(binding = 0) buffer dst_buf;
97 * location(binding = 1) buffer src_buf;
100 * uint64_t result = 0;
101 * uint64_t src_offset = src_stride * global_id.x;
102 * uint64_t dst_offset = dst_stride * global_id.x;
103 * bool available = true;
104 * for (int i = 0; i < db_count; ++i) {
105 * uint64_t start = src_buf[src_offset + 16 * i];
106 * uint64_t end = src_buf[src_offset + 16 * i + 8];
107 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
108 * result += end - start;
112 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
113 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
114 * if (flags & VK_QUERY_RESULT_64_BIT)
115 * dst_buf[dst_offset] = result;
117 * dst_buf[dst_offset] = (uint32_t)result.
119 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
120 * dst_buf[dst_offset + elem_size] = available;
125 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
126 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
127 b
.shader
->info
.cs
.local_size
[0] = 64;
128 b
.shader
->info
.cs
.local_size
[1] = 1;
129 b
.shader
->info
.cs
.local_size
[2] = 1;
131 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
132 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
133 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
134 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
135 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_int_type(), "available");
136 unsigned db_count
= get_max_db(device
);
138 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
140 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
141 nir_intrinsic_vulkan_resource_index
);
142 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
143 nir_intrinsic_set_desc_set(dst_buf
, 0);
144 nir_intrinsic_set_binding(dst_buf
, 0);
145 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
146 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
148 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
149 nir_intrinsic_vulkan_resource_index
);
150 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
151 nir_intrinsic_set_desc_set(src_buf
, 0);
152 nir_intrinsic_set_binding(src_buf
, 1);
153 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
154 nir_builder_instr_insert(&b
, &src_buf
->instr
);
156 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
157 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
158 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
159 b
.shader
->info
.cs
.local_size
[0],
160 b
.shader
->info
.cs
.local_size
[1],
161 b
.shader
->info
.cs
.local_size
[2], 0);
162 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
163 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
165 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
166 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
167 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
168 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
171 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
172 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
173 nir_store_var(&b
, available
, nir_imm_int(&b
, 1), 0x1);
175 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
176 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
177 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
179 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
180 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
182 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
183 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
185 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
186 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
187 load
->src
[1] = nir_src_for_ssa(load_offset
);
188 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
189 load
->num_components
= 2;
190 nir_builder_instr_insert(&b
, &load
->instr
);
192 nir_store_var(&b
, start
, nir_channel(&b
, &load
->dest
.ssa
, 0), 0x1);
193 nir_store_var(&b
, end
, nir_channel(&b
, &load
->dest
.ssa
, 1), 0x1);
195 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
196 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
198 nir_if
*update_if
= nir_if_create(b
.shader
);
199 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
200 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
202 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
204 nir_store_var(&b
, result
,
205 nir_iadd(&b
, nir_load_var(&b
, result
),
206 nir_isub(&b
, nir_load_var(&b
, end
),
207 nir_load_var(&b
, start
))), 0x1);
209 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
211 nir_store_var(&b
, available
, nir_imm_int(&b
, 0), 0x1);
213 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
215 /* Store the result if complete or if partial results have been requested. */
217 nir_ssa_def
*result_is_64bit
= nir_iand(&b
, flags
,
218 nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
219 nir_ssa_def
*result_size
= nir_b32csel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
221 nir_if
*store_if
= nir_if_create(b
.shader
);
222 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)), nir_load_var(&b
, available
)));
223 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
225 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
227 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
228 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
229 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
231 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
233 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
234 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
235 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
236 store
->src
[2] = nir_src_for_ssa(output_base
);
237 nir_intrinsic_set_write_mask(store
, 0x1);
238 store
->num_components
= 1;
239 nir_builder_instr_insert(&b
, &store
->instr
);
241 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
243 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
244 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
245 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
246 store
->src
[2] = nir_src_for_ssa(output_base
);
247 nir_intrinsic_set_write_mask(store
, 0x1);
248 store
->num_components
= 1;
249 nir_builder_instr_insert(&b
, &store
->instr
);
251 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
253 /* Store the availability bit if requested. */
255 nir_if
*availability_if
= nir_if_create(b
.shader
);
256 availability_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
257 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
259 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
261 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
262 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, available
));
263 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
264 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
265 nir_intrinsic_set_write_mask(store
, 0x1);
266 store
->num_components
= 1;
267 nir_builder_instr_insert(&b
, &store
->instr
);
273 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
274 /* the shader this builds is roughly
278 * uint32_t dst_stride;
279 * uint32_t stats_mask;
280 * uint32_t avail_offset;
283 * uint32_t src_stride = pipelinestat_block_size * 2;
285 * location(binding = 0) buffer dst_buf;
286 * location(binding = 1) buffer src_buf;
289 * uint64_t src_offset = src_stride * global_id.x;
290 * uint64_t dst_base = dst_stride * global_id.x;
291 * uint64_t dst_offset = dst_base;
292 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
293 * uint32_t elem_count = stats_mask >> 16;
294 * uint32_t available = src_buf[avail_offset + 4 * global_id.x];
295 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
296 * dst_buf[dst_offset + elem_count * elem_size] = available;
299 * // repeat 11 times:
300 * if (stats_mask & (1 << 0)) {
301 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
302 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
303 * uint64_t result = end - start;
304 * if (flags & VK_QUERY_RESULT_64_BIT)
305 * dst_buf[dst_offset] = result;
307 * dst_buf[dst_offset] = (uint32_t)result.
308 * dst_offset += elem_size;
310 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
311 * // Set everything to 0 as we don't know what is valid.
312 * for (int i = 0; i < elem_count; ++i)
313 * dst_buf[dst_base + elem_size * i] = 0;
318 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
319 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
320 b
.shader
->info
.cs
.local_size
[0] = 64;
321 b
.shader
->info
.cs
.local_size
[1] = 1;
322 b
.shader
->info
.cs
.local_size
[2] = 1;
324 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
326 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
327 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
328 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
330 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
331 nir_intrinsic_vulkan_resource_index
);
332 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
333 nir_intrinsic_set_desc_set(dst_buf
, 0);
334 nir_intrinsic_set_binding(dst_buf
, 0);
335 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
336 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
338 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
339 nir_intrinsic_vulkan_resource_index
);
340 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
341 nir_intrinsic_set_desc_set(src_buf
, 0);
342 nir_intrinsic_set_binding(src_buf
, 1);
343 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
344 nir_builder_instr_insert(&b
, &src_buf
->instr
);
346 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
347 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
348 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
349 b
.shader
->info
.cs
.local_size
[0],
350 b
.shader
->info
.cs
.local_size
[1],
351 b
.shader
->info
.cs
.local_size
[2], 0);
352 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
353 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
355 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
356 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
357 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
358 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
361 avail_offset
= nir_iadd(&b
, avail_offset
,
362 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
364 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
365 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
366 load
->src
[1] = nir_src_for_ssa(avail_offset
);
367 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
368 load
->num_components
= 1;
369 nir_builder_instr_insert(&b
, &load
->instr
);
370 nir_ssa_def
*available
= &load
->dest
.ssa
;
372 nir_ssa_def
*result_is_64bit
= nir_iand(&b
, flags
,
373 nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
374 nir_ssa_def
*elem_size
= nir_b32csel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
375 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
377 /* Store the availability bit if requested. */
379 nir_if
*availability_if
= nir_if_create(b
.shader
);
380 availability_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
381 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
383 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
385 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
386 store
->src
[0] = nir_src_for_ssa(available
);
387 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
388 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)));
389 nir_intrinsic_set_write_mask(store
, 0x1);
390 store
->num_components
= 1;
391 nir_builder_instr_insert(&b
, &store
->instr
);
393 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
395 nir_if
*available_if
= nir_if_create(b
.shader
);
396 available_if
->condition
= nir_src_for_ssa(available
);
397 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
399 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
401 nir_store_var(&b
, output_offset
, output_base
, 0x1);
402 for (int i
= 0; i
< 11; ++i
) {
403 nir_if
*store_if
= nir_if_create(b
.shader
);
404 store_if
->condition
= nir_src_for_ssa(nir_iand(&b
, stats_mask
, nir_imm_int(&b
, 1u << i
)));
405 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
407 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
409 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
410 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
411 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
412 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
413 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
414 load
->num_components
= 1;
415 nir_builder_instr_insert(&b
, &load
->instr
);
416 nir_ssa_def
*start
= &load
->dest
.ssa
;
418 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
419 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
420 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
421 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
422 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
423 load
->num_components
= 1;
424 nir_builder_instr_insert(&b
, &load
->instr
);
425 nir_ssa_def
*end
= &load
->dest
.ssa
;
427 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
430 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
431 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
432 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
434 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
436 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
437 store
->src
[0] = nir_src_for_ssa(result
);
438 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
439 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
440 nir_intrinsic_set_write_mask(store
, 0x1);
441 store
->num_components
= 1;
442 nir_builder_instr_insert(&b
, &store
->instr
);
444 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
446 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
447 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
448 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
449 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
450 nir_intrinsic_set_write_mask(store
, 0x1);
451 store
->num_components
= 1;
452 nir_builder_instr_insert(&b
, &store
->instr
);
454 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
456 nir_store_var(&b
, output_offset
,
457 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
460 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
463 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
465 available_if
= nir_if_create(b
.shader
);
466 available_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
,
467 nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)));
468 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
470 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
472 /* Stores zeros in all outputs. */
474 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
475 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
477 nir_loop
*loop
= nir_loop_create(b
.shader
);
478 nir_builder_cf_insert(&b
, &loop
->cf_node
);
479 b
.cursor
= nir_after_cf_list(&loop
->body
);
481 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
482 radv_break_on_count(&b
, counter
, elem_count
);
484 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
485 nir_imul(&b
, elem_size
, current_counter
));
487 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
488 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
489 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
491 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
493 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
494 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
495 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
496 store
->src
[2] = nir_src_for_ssa(output_elem
);
497 nir_intrinsic_set_write_mask(store
, 0x1);
498 store
->num_components
= 1;
499 nir_builder_instr_insert(&b
, &store
->instr
);
501 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
503 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
504 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
505 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
506 store
->src
[2] = nir_src_for_ssa(output_elem
);
507 nir_intrinsic_set_write_mask(store
, 0x1);
508 store
->num_components
= 1;
509 nir_builder_instr_insert(&b
, &store
->instr
);
511 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
516 build_tfb_query_shader(struct radv_device
*device
)
518 /* the shader this builds is roughly
520 * uint32_t src_stride = 32;
522 * location(binding = 0) buffer dst_buf;
523 * location(binding = 1) buffer src_buf;
526 * uint64_t result[2] = {};
527 * bool available = false;
528 * uint64_t src_offset = src_stride * global_id.x;
529 * uint64_t dst_offset = dst_stride * global_id.x;
530 * uint64_t *src_data = src_buf[src_offset];
531 * uint32_t avail = (src_data[0] >> 32) &
532 * (src_data[1] >> 32) &
533 * (src_data[2] >> 32) &
534 * (src_data[3] >> 32);
535 * if (avail & 0x80000000) {
536 * result[0] = src_data[3] - src_data[1];
537 * result[1] = src_data[2] - src_data[0];
540 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
541 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
542 * if (flags & VK_QUERY_RESULT_64_BIT) {
543 * dst_buf[dst_offset] = result;
545 * dst_buf[dst_offset] = (uint32_t)result;
548 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
549 * dst_buf[dst_offset + result_size] = available;
554 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
555 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "tfb_query");
556 b
.shader
->info
.cs
.local_size
[0] = 64;
557 b
.shader
->info
.cs
.local_size
[1] = 1;
558 b
.shader
->info
.cs
.local_size
[2] = 1;
560 /* Create and initialize local variables. */
561 nir_variable
*result
=
562 nir_local_variable_create(b
.impl
,
563 glsl_vector_type(GLSL_TYPE_UINT64
, 2),
565 nir_variable
*available
=
566 nir_local_variable_create(b
.impl
, glsl_int_type(), "available");
568 nir_store_var(&b
, result
,
569 nir_vec2(&b
, nir_imm_int64(&b
, 0),
570 nir_imm_int64(&b
, 0)), 0x3);
571 nir_store_var(&b
, available
, nir_imm_int(&b
, 0), 0x1);
573 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
575 /* Load resources. */
576 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
577 nir_intrinsic_vulkan_resource_index
);
578 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
579 nir_intrinsic_set_desc_set(dst_buf
, 0);
580 nir_intrinsic_set_binding(dst_buf
, 0);
581 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
582 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
584 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
585 nir_intrinsic_vulkan_resource_index
);
586 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
587 nir_intrinsic_set_desc_set(src_buf
, 0);
588 nir_intrinsic_set_binding(src_buf
, 1);
589 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
590 nir_builder_instr_insert(&b
, &src_buf
->instr
);
592 /* Compute global ID. */
593 nir_ssa_def
*invoc_id
= nir_load_system_value(&b
, nir_intrinsic_load_local_invocation_id
, 0);
594 nir_ssa_def
*wg_id
= nir_load_system_value(&b
, nir_intrinsic_load_work_group_id
, 0);
595 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
596 b
.shader
->info
.cs
.local_size
[0],
597 b
.shader
->info
.cs
.local_size
[1],
598 b
.shader
->info
.cs
.local_size
[2], 0);
599 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
600 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
602 /* Compute src/dst strides. */
603 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 32);
604 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
605 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
606 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
608 /* Load data from the query pool. */
609 nir_intrinsic_instr
*load1
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
610 load1
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
611 load1
->src
[1] = nir_src_for_ssa(input_base
);
612 nir_ssa_dest_init(&load1
->instr
, &load1
->dest
, 4, 32, NULL
);
613 load1
->num_components
= 4;
614 nir_builder_instr_insert(&b
, &load1
->instr
);
616 nir_intrinsic_instr
*load2
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
617 load2
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
618 load2
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
, nir_imm_int(&b
, 16)));
619 nir_ssa_dest_init(&load2
->instr
, &load2
->dest
, 4, 32, NULL
);
620 load2
->num_components
= 4;
621 nir_builder_instr_insert(&b
, &load2
->instr
);
623 /* Check if result is available. */
624 nir_ssa_def
*avails
[2];
625 avails
[0] = nir_iand(&b
, nir_channel(&b
, &load1
->dest
.ssa
, 1),
626 nir_channel(&b
, &load1
->dest
.ssa
, 3));
627 avails
[1] = nir_iand(&b
, nir_channel(&b
, &load2
->dest
.ssa
, 1),
628 nir_channel(&b
, &load2
->dest
.ssa
, 3));
629 nir_ssa_def
*result_is_available
=
630 nir_iand(&b
, nir_iand(&b
, avails
[0], avails
[1]),
631 nir_imm_int(&b
, 0x80000000));
633 /* Only compute result if available. */
634 nir_if
*available_if
= nir_if_create(b
.shader
);
635 available_if
->condition
= nir_src_for_ssa(result_is_available
);
636 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
638 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
641 nir_ssa_def
*packed64
[4];
642 packed64
[0] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
643 nir_channel(&b
, &load1
->dest
.ssa
, 0),
644 nir_channel(&b
, &load1
->dest
.ssa
, 1)));
645 packed64
[1] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
646 nir_channel(&b
, &load1
->dest
.ssa
, 2),
647 nir_channel(&b
, &load1
->dest
.ssa
, 3)));
648 packed64
[2] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
649 nir_channel(&b
, &load2
->dest
.ssa
, 0),
650 nir_channel(&b
, &load2
->dest
.ssa
, 1)));
651 packed64
[3] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
652 nir_channel(&b
, &load2
->dest
.ssa
, 2),
653 nir_channel(&b
, &load2
->dest
.ssa
, 3)));
655 /* Compute result. */
656 nir_ssa_def
*num_primitive_written
=
657 nir_isub(&b
, packed64
[3], packed64
[1]);
658 nir_ssa_def
*primitive_storage_needed
=
659 nir_isub(&b
, packed64
[2], packed64
[0]);
661 nir_store_var(&b
, result
,
662 nir_vec2(&b
, num_primitive_written
,
663 primitive_storage_needed
), 0x3);
664 nir_store_var(&b
, available
, nir_imm_int(&b
, 1), 0x1);
666 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
668 /* Determine if result is 64 or 32 bit. */
669 nir_ssa_def
*result_is_64bit
=
670 nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
671 nir_ssa_def
*result_size
=
672 nir_b32csel(&b
, result_is_64bit
, nir_imm_int(&b
, 16),
675 /* Store the result if complete or partial results have been requested. */
676 nir_if
*store_if
= nir_if_create(b
.shader
);
677 store_if
->condition
=
678 nir_src_for_ssa(nir_ior(&b
, nir_iand(&b
, flags
,
679 nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)),
680 nir_load_var(&b
, available
)));
681 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
683 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
686 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
687 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
688 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
690 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
692 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
693 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
694 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
695 store
->src
[2] = nir_src_for_ssa(output_base
);
696 nir_intrinsic_set_write_mask(store
, 0x3);
697 store
->num_components
= 2;
698 nir_builder_instr_insert(&b
, &store
->instr
);
700 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
702 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
703 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
704 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
705 store
->src
[2] = nir_src_for_ssa(output_base
);
706 nir_intrinsic_set_write_mask(store
, 0x3);
707 store
->num_components
= 2;
708 nir_builder_instr_insert(&b
, &store
->instr
);
710 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
712 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
714 /* Store the availability bit if requested. */
715 nir_if
*availability_if
= nir_if_create(b
.shader
);
716 availability_if
->condition
=
717 nir_src_for_ssa(nir_iand(&b
, flags
,
718 nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
719 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
721 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
723 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
724 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, available
));
725 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
726 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
727 nir_intrinsic_set_write_mask(store
, 0x1);
728 store
->num_components
= 1;
729 nir_builder_instr_insert(&b
, &store
->instr
);
731 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
736 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device
*device
)
739 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
740 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
741 struct radv_shader_module tfb_cs
= { .nir
= NULL
};
743 mtx_lock(&device
->meta_state
.mtx
);
744 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
) {
745 mtx_unlock(&device
->meta_state
.mtx
);
748 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
749 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
750 tfb_cs
.nir
= build_tfb_query_shader(device
);
752 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
753 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
754 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
756 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
759 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
760 .descriptorCount
= 1,
761 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
762 .pImmutableSamplers
= NULL
766 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
767 .descriptorCount
= 1,
768 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
769 .pImmutableSamplers
= NULL
774 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
775 &occlusion_ds_create_info
,
776 &device
->meta_state
.alloc
,
777 &device
->meta_state
.query
.ds_layout
);
778 if (result
!= VK_SUCCESS
)
781 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
782 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
784 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
785 .pushConstantRangeCount
= 1,
786 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
789 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
790 &occlusion_pl_create_info
,
791 &device
->meta_state
.alloc
,
792 &device
->meta_state
.query
.p_layout
);
793 if (result
!= VK_SUCCESS
)
796 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
797 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
798 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
799 .module
= radv_shader_module_to_handle(&occlusion_cs
),
801 .pSpecializationInfo
= NULL
,
804 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
805 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
806 .stage
= occlusion_pipeline_shader_stage
,
808 .layout
= device
->meta_state
.query
.p_layout
,
811 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
812 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
813 1, &occlusion_vk_pipeline_info
, NULL
,
814 &device
->meta_state
.query
.occlusion_query_pipeline
);
815 if (result
!= VK_SUCCESS
)
818 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
819 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
820 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
821 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
823 .pSpecializationInfo
= NULL
,
826 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
827 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
828 .stage
= pipeline_statistics_pipeline_shader_stage
,
830 .layout
= device
->meta_state
.query
.p_layout
,
833 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
834 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
835 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
836 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
837 if (result
!= VK_SUCCESS
)
840 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage
= {
841 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
842 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
843 .module
= radv_shader_module_to_handle(&tfb_cs
),
845 .pSpecializationInfo
= NULL
,
848 VkComputePipelineCreateInfo tfb_pipeline_info
= {
849 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
850 .stage
= tfb_pipeline_shader_stage
,
852 .layout
= device
->meta_state
.query
.p_layout
,
855 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
856 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
857 1, &tfb_pipeline_info
, NULL
,
858 &device
->meta_state
.query
.tfb_query_pipeline
);
860 if (result
!= VK_SUCCESS
)
861 radv_device_finish_meta_query_state(device
);
862 ralloc_free(occlusion_cs
.nir
);
863 ralloc_free(pipeline_statistics_cs
.nir
);
864 ralloc_free(tfb_cs
.nir
);
865 mtx_unlock(&device
->meta_state
.mtx
);
869 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
, bool on_demand
)
874 return radv_device_init_meta_query_state_internal(device
);
877 void radv_device_finish_meta_query_state(struct radv_device
*device
)
879 if (device
->meta_state
.query
.tfb_query_pipeline
)
880 radv_DestroyPipeline(radv_device_to_handle(device
),
881 device
->meta_state
.query
.tfb_query_pipeline
,
882 &device
->meta_state
.alloc
);
884 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
885 radv_DestroyPipeline(radv_device_to_handle(device
),
886 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
887 &device
->meta_state
.alloc
);
889 if (device
->meta_state
.query
.occlusion_query_pipeline
)
890 radv_DestroyPipeline(radv_device_to_handle(device
),
891 device
->meta_state
.query
.occlusion_query_pipeline
,
892 &device
->meta_state
.alloc
);
894 if (device
->meta_state
.query
.p_layout
)
895 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
896 device
->meta_state
.query
.p_layout
,
897 &device
->meta_state
.alloc
);
899 if (device
->meta_state
.query
.ds_layout
)
900 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
901 device
->meta_state
.query
.ds_layout
,
902 &device
->meta_state
.alloc
);
905 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
906 VkPipeline
*pipeline
,
907 struct radeon_winsys_bo
*src_bo
,
908 struct radeon_winsys_bo
*dst_bo
,
909 uint64_t src_offset
, uint64_t dst_offset
,
910 uint32_t src_stride
, uint32_t dst_stride
,
911 uint32_t count
, uint32_t flags
,
912 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
914 struct radv_device
*device
= cmd_buffer
->device
;
915 struct radv_meta_saved_state saved_state
;
916 bool old_predicating
;
919 VkResult ret
= radv_device_init_meta_query_state_internal(device
);
920 if (ret
!= VK_SUCCESS
) {
921 cmd_buffer
->record_result
= ret
;
926 radv_meta_save(&saved_state
, cmd_buffer
,
927 RADV_META_SAVE_COMPUTE_PIPELINE
|
928 RADV_META_SAVE_CONSTANTS
|
929 RADV_META_SAVE_DESCRIPTORS
);
931 /* VK_EXT_conditional_rendering says that copy commands should not be
932 * affected by conditional rendering.
934 old_predicating
= cmd_buffer
->state
.predicating
;
935 cmd_buffer
->state
.predicating
= false;
937 struct radv_buffer dst_buffer
= {
939 .offset
= dst_offset
,
940 .size
= dst_stride
* count
943 struct radv_buffer src_buffer
= {
945 .offset
= src_offset
,
946 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
949 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
950 VK_PIPELINE_BIND_POINT_COMPUTE
, *pipeline
);
952 radv_meta_push_descriptor_set(cmd_buffer
,
953 VK_PIPELINE_BIND_POINT_COMPUTE
,
954 device
->meta_state
.query
.p_layout
,
956 2, /* descriptorWriteCount */
957 (VkWriteDescriptorSet
[]) {
959 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
961 .dstArrayElement
= 0,
962 .descriptorCount
= 1,
963 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
964 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
965 .buffer
= radv_buffer_to_handle(&dst_buffer
),
967 .range
= VK_WHOLE_SIZE
971 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
973 .dstArrayElement
= 0,
974 .descriptorCount
= 1,
975 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
976 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
977 .buffer
= radv_buffer_to_handle(&src_buffer
),
979 .range
= VK_WHOLE_SIZE
984 /* Encode the number of elements for easy access by the shader. */
985 pipeline_stats_mask
&= 0x7ff;
986 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
988 avail_offset
-= src_offset
;
993 uint32_t pipeline_stats_mask
;
994 uint32_t avail_offset
;
1002 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
1003 device
->meta_state
.query
.p_layout
,
1004 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
1007 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
1008 RADV_CMD_FLAG_INV_VMEM_L1
;
1010 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1011 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
1013 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
1015 /* Restore conditional rendering. */
1016 cmd_buffer
->state
.predicating
= old_predicating
;
1018 radv_meta_restore(&saved_state
, cmd_buffer
);
1021 VkResult
radv_CreateQueryPool(
1023 const VkQueryPoolCreateInfo
* pCreateInfo
,
1024 const VkAllocationCallbacks
* pAllocator
,
1025 VkQueryPool
* pQueryPool
)
1027 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1028 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
1030 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1031 uint32_t initial_value
= pCreateInfo
->queryType
== VK_QUERY_TYPE_TIMESTAMP
1032 ? TIMESTAMP_NOT_READY
: 0;
1035 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1038 switch(pCreateInfo
->queryType
) {
1039 case VK_QUERY_TYPE_OCCLUSION
:
1040 pool
->stride
= 16 * get_max_db(device
);
1042 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1043 pool
->stride
= pipelinestat_block_size
* 2;
1045 case VK_QUERY_TYPE_TIMESTAMP
:
1048 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1052 unreachable("creating unhandled query type");
1055 pool
->type
= pCreateInfo
->queryType
;
1056 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
1057 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
1058 pool
->size
= pool
->availability_offset
;
1059 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1060 pool
->size
+= 4 * pCreateInfo
->queryCount
;
1062 pool
->bo
= device
->ws
->buffer_create(device
->ws
, pool
->size
,
1063 64, RADEON_DOMAIN_GTT
, RADEON_FLAG_NO_INTERPROCESS_SHARING
);
1066 vk_free2(&device
->alloc
, pAllocator
, pool
);
1067 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1070 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
1073 device
->ws
->buffer_destroy(pool
->bo
);
1074 vk_free2(&device
->alloc
, pAllocator
, pool
);
1075 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1077 memset(pool
->ptr
, initial_value
, pool
->size
);
1079 *pQueryPool
= radv_query_pool_to_handle(pool
);
1083 void radv_DestroyQueryPool(
1086 const VkAllocationCallbacks
* pAllocator
)
1088 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1089 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
1094 device
->ws
->buffer_destroy(pool
->bo
);
1095 vk_free2(&device
->alloc
, pAllocator
, pool
);
1098 VkResult
radv_GetQueryPoolResults(
1100 VkQueryPool queryPool
,
1101 uint32_t firstQuery
,
1102 uint32_t queryCount
,
1105 VkDeviceSize stride
,
1106 VkQueryResultFlags flags
)
1108 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1109 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1111 VkResult result
= VK_SUCCESS
;
1113 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
1115 unsigned query
= firstQuery
+ i
;
1116 char *src
= pool
->ptr
+ query
* pool
->stride
;
1119 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1120 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1121 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
1123 available
= *(uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
1126 switch (pool
->type
) {
1127 case VK_QUERY_TYPE_TIMESTAMP
: {
1128 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1130 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1131 while (*(volatile uint64_t *)src
== TIMESTAMP_NOT_READY
)
1133 available
= *(uint64_t *)src
!= TIMESTAMP_NOT_READY
;
1136 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1137 result
= VK_NOT_READY
;
1142 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1143 *(uint64_t*)dest
= *(uint64_t*)src
;
1146 *(uint32_t*)dest
= *(uint32_t*)src
;
1151 case VK_QUERY_TYPE_OCCLUSION
: {
1152 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1153 uint64_t sample_count
= 0;
1154 int db_count
= get_max_db(device
);
1157 for (int i
= 0; i
< db_count
; ++i
) {
1158 uint64_t start
, end
;
1160 start
= src64
[2 * i
];
1161 end
= src64
[2 * i
+ 1];
1162 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
1164 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
1167 sample_count
+= end
- start
;
1171 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1172 result
= VK_NOT_READY
;
1177 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1178 *(uint64_t*)dest
= sample_count
;
1181 *(uint32_t*)dest
= sample_count
;
1186 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
1187 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1188 result
= VK_NOT_READY
;
1193 const uint64_t *start
= (uint64_t*)src
;
1194 const uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
1195 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1196 uint64_t *dst
= (uint64_t*)dest
;
1197 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
1198 for(int i
= 0; i
< 11; ++i
)
1199 if(pool
->pipeline_stats_mask
& (1u << i
))
1200 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
1201 start
[pipeline_statistics_indices
[i
]];
1204 uint32_t *dst
= (uint32_t*)dest
;
1205 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
1206 for(int i
= 0; i
< 11; ++i
)
1207 if(pool
->pipeline_stats_mask
& (1u << i
))
1208 *dst
++ = stop
[pipeline_statistics_indices
[i
]] -
1209 start
[pipeline_statistics_indices
[i
]];
1213 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
: {
1214 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1215 uint64_t num_primitives_written
;
1216 uint64_t primitive_storage_needed
;
1218 /* SAMPLE_STREAMOUTSTATS stores this structure:
1220 * u64 NumPrimitivesWritten;
1221 * u64 PrimitiveStorageNeeded;
1225 for (int j
= 0; j
< 4; j
++) {
1226 if (!(src64
[j
] & 0x8000000000000000UL
))
1230 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
1231 result
= VK_NOT_READY
;
1235 num_primitives_written
= src64
[3] - src64
[1];
1236 primitive_storage_needed
= src64
[2] - src64
[0];
1238 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1239 *(uint64_t *)dest
= num_primitives_written
;
1241 *(uint64_t *)dest
= primitive_storage_needed
;
1244 *(uint32_t *)dest
= num_primitives_written
;
1246 *(uint32_t *)dest
= primitive_storage_needed
;
1252 unreachable("trying to get results of unhandled query type");
1255 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1256 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1257 *(uint64_t*)dest
= available
;
1259 *(uint32_t*)dest
= available
;
1267 void radv_CmdCopyQueryPoolResults(
1268 VkCommandBuffer commandBuffer
,
1269 VkQueryPool queryPool
,
1270 uint32_t firstQuery
,
1271 uint32_t queryCount
,
1273 VkDeviceSize dstOffset
,
1274 VkDeviceSize stride
,
1275 VkQueryResultFlags flags
)
1277 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1278 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1279 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
1280 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1281 unsigned elem_size
= (flags
& VK_QUERY_RESULT_64_BIT
) ? 8 : 4;
1282 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1283 uint64_t dest_va
= radv_buffer_get_va(dst_buffer
->bo
);
1284 dest_va
+= dst_buffer
->offset
+ dstOffset
;
1286 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, pool
->bo
);
1287 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, dst_buffer
->bo
);
1289 switch (pool
->type
) {
1290 case VK_QUERY_TYPE_OCCLUSION
:
1291 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1292 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1293 unsigned query
= firstQuery
+ i
;
1294 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
1296 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1298 /* Waits on the upper word of the last DB entry */
1299 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1300 src_va
, 0x80000000, 0xffffffff);
1303 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
1304 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1305 dst_buffer
->offset
+ dstOffset
,
1306 pool
->stride
, stride
,
1307 queryCount
, flags
, 0, 0);
1309 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1310 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1311 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1312 unsigned query
= firstQuery
+ i
;
1314 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1316 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1318 /* This waits on the ME. All copies below are done on the ME */
1319 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
,
1320 avail_va
, 1, 0xffffffff);
1323 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1324 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1325 dst_buffer
->offset
+ dstOffset
,
1326 pool
->stride
, stride
, queryCount
, flags
,
1327 pool
->pipeline_stats_mask
,
1328 pool
->availability_offset
+ 4 * firstQuery
);
1330 case VK_QUERY_TYPE_TIMESTAMP
:
1331 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1332 unsigned query
= firstQuery
+ i
;
1333 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1335 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 19);
1338 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1339 /* Wait on the high 32 bits of the timestamp in
1340 * case the low part is 0xffffffff.
1342 radv_cp_wait_mem(cs
, WAIT_REG_MEM_NOT_EQUAL
,
1344 TIMESTAMP_NOT_READY
>> 32,
1347 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1348 uint64_t avail_dest_va
= dest_va
+ elem_size
;
1350 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1351 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1352 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
));
1353 radeon_emit(cs
, local_src_va
);
1354 radeon_emit(cs
, local_src_va
>> 32);
1355 radeon_emit(cs
, avail_dest_va
);
1356 radeon_emit(cs
, avail_dest_va
>> 32);
1359 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1360 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM
) |
1361 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM
) |
1362 ((flags
& VK_QUERY_RESULT_64_BIT
) ? COPY_DATA_COUNT_SEL
: 0));
1363 radeon_emit(cs
, local_src_va
);
1364 radeon_emit(cs
, local_src_va
>> 32);
1365 radeon_emit(cs
, dest_va
);
1366 radeon_emit(cs
, dest_va
>> 32);
1369 assert(cs
->cdw
<= cdw_max
);
1372 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1373 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1374 for(unsigned i
= 0; i
< queryCount
; i
++) {
1375 unsigned query
= firstQuery
+ i
;
1376 uint64_t src_va
= va
+ query
* pool
->stride
;
1378 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7 * 4);
1380 /* Wait on the upper word of all results. */
1381 for (unsigned j
= 0; j
< 4; j
++, src_va
+= 8) {
1382 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1383 src_va
+ 4, 0x80000000,
1389 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.tfb_query_pipeline
,
1390 pool
->bo
, dst_buffer
->bo
,
1391 firstQuery
* pool
->stride
,
1392 dst_buffer
->offset
+ dstOffset
,
1393 pool
->stride
, stride
,
1394 queryCount
, flags
, 0, 0);
1397 unreachable("trying to get results of unhandled query type");
1402 void radv_CmdResetQueryPool(
1403 VkCommandBuffer commandBuffer
,
1404 VkQueryPool queryPool
,
1405 uint32_t firstQuery
,
1406 uint32_t queryCount
)
1408 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1409 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1410 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1411 ? TIMESTAMP_NOT_READY
: 0;
1412 uint32_t flush_bits
= 0;
1414 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1415 firstQuery
* pool
->stride
,
1416 queryCount
* pool
->stride
, value
);
1418 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1419 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1420 pool
->availability_offset
+ firstQuery
* 4,
1425 /* Only need to flush caches for the compute shader path. */
1426 cmd_buffer
->pending_reset_query
= true;
1427 cmd_buffer
->state
.flush_bits
|= flush_bits
;
1431 static unsigned event_type_for_stream(unsigned stream
)
1435 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
1436 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
1437 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
1438 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
1442 static void emit_query_flush(struct radv_cmd_buffer
*cmd_buffer
,
1443 struct radv_query_pool
*pool
)
1445 if (cmd_buffer
->pending_reset_query
) {
1446 if (pool
->size
>= RADV_BUFFER_OPS_CS_THRESHOLD
) {
1447 /* Only need to flush caches if the query pool size is
1448 * large enough to be resetted using the compute shader
1449 * path. Small pools don't need any cache flushes
1450 * because we use a CP dma clear.
1452 si_emit_cache_flush(cmd_buffer
);
1457 static void emit_begin_query(struct radv_cmd_buffer
*cmd_buffer
,
1459 VkQueryType query_type
,
1460 VkQueryControlFlags flags
,
1463 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1464 switch (query_type
) {
1465 case VK_QUERY_TYPE_OCCLUSION
:
1466 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1468 ++cmd_buffer
->state
.active_occlusion_queries
;
1469 if (cmd_buffer
->state
.active_occlusion_queries
== 1) {
1470 if (flags
& VK_QUERY_CONTROL_PRECISE_BIT
) {
1471 /* This is the first occlusion query, enable
1472 * the hint if the precision bit is set.
1474 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1477 radv_set_db_count_control(cmd_buffer
);
1479 if ((flags
& VK_QUERY_CONTROL_PRECISE_BIT
) &&
1480 !cmd_buffer
->state
.perfect_occlusion_queries_enabled
) {
1481 /* This is not the first query, but this one
1482 * needs to enable precision, DB_COUNT_CONTROL
1483 * has to be updated accordingly.
1485 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1487 radv_set_db_count_control(cmd_buffer
);
1491 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1492 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1493 radeon_emit(cs
, va
);
1494 radeon_emit(cs
, va
>> 32);
1496 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1497 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1499 ++cmd_buffer
->state
.active_pipeline_queries
;
1500 if (cmd_buffer
->state
.active_pipeline_queries
== 1) {
1501 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1502 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_START_PIPELINE_STATS
;
1505 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1506 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1507 radeon_emit(cs
, va
);
1508 radeon_emit(cs
, va
>> 32);
1510 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1511 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1513 assert(index
< MAX_SO_STREAMS
);
1515 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1516 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1517 radeon_emit(cs
, va
);
1518 radeon_emit(cs
, va
>> 32);
1521 unreachable("beginning unhandled query type");
1526 static void emit_end_query(struct radv_cmd_buffer
*cmd_buffer
,
1527 uint64_t va
, uint64_t avail_va
,
1528 VkQueryType query_type
, uint32_t index
)
1530 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1531 switch (query_type
) {
1532 case VK_QUERY_TYPE_OCCLUSION
:
1533 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1535 cmd_buffer
->state
.active_occlusion_queries
--;
1536 if (cmd_buffer
->state
.active_occlusion_queries
== 0) {
1537 radv_set_db_count_control(cmd_buffer
);
1539 /* Reset the perfect occlusion queries hint now that no
1540 * queries are active.
1542 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= false;
1545 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1546 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1547 radeon_emit(cs
, va
+ 8);
1548 radeon_emit(cs
, (va
+ 8) >> 32);
1551 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1552 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1554 cmd_buffer
->state
.active_pipeline_queries
--;
1555 if (cmd_buffer
->state
.active_pipeline_queries
== 0) {
1556 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_START_PIPELINE_STATS
;
1557 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1559 va
+= pipelinestat_block_size
;
1561 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1562 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1563 radeon_emit(cs
, va
);
1564 radeon_emit(cs
, va
>> 32);
1566 si_cs_emit_write_event_eop(cs
,
1567 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1568 radv_cmd_buffer_uses_mec(cmd_buffer
),
1569 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1570 EOP_DATA_SEL_VALUE_32BIT
,
1572 cmd_buffer
->gfx9_eop_bug_va
);
1574 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1575 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1577 assert(index
< MAX_SO_STREAMS
);
1579 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1580 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1581 radeon_emit(cs
, (va
+ 16));
1582 radeon_emit(cs
, (va
+ 16) >> 32);
1585 unreachable("ending unhandled query type");
1589 void radv_CmdBeginQueryIndexedEXT(
1590 VkCommandBuffer commandBuffer
,
1591 VkQueryPool queryPool
,
1593 VkQueryControlFlags flags
,
1596 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1597 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1598 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1599 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1601 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1603 emit_query_flush(cmd_buffer
, pool
);
1605 va
+= pool
->stride
* query
;
1607 emit_begin_query(cmd_buffer
, va
, pool
->type
, flags
, index
);
1610 void radv_CmdBeginQuery(
1611 VkCommandBuffer commandBuffer
,
1612 VkQueryPool queryPool
,
1614 VkQueryControlFlags flags
)
1616 radv_CmdBeginQueryIndexedEXT(commandBuffer
, queryPool
, query
, flags
, 0);
1619 void radv_CmdEndQueryIndexedEXT(
1620 VkCommandBuffer commandBuffer
,
1621 VkQueryPool queryPool
,
1625 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1626 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1627 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1628 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1629 va
+= pool
->stride
* query
;
1631 /* Do not need to add the pool BO to the list because the query must
1632 * currently be active, which means the BO is already in the list.
1634 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, index
);
1637 * For multiview we have to emit a query for each bit in the mask,
1638 * however the first query we emit will get the totals for all the
1639 * operations, so we don't want to get a real value in the other
1640 * queries. This emits a fake begin/end sequence so the waiting
1641 * code gets a completed query value and doesn't hang, but the
1644 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
) {
1645 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1648 for (unsigned i
= 1; i
< util_bitcount(cmd_buffer
->state
.subpass
->view_mask
); i
++) {
1651 emit_begin_query(cmd_buffer
, va
, pool
->type
, 0, 0);
1652 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, 0);
1657 void radv_CmdEndQuery(
1658 VkCommandBuffer commandBuffer
,
1659 VkQueryPool queryPool
,
1662 radv_CmdEndQueryIndexedEXT(commandBuffer
, queryPool
, query
, 0);
1665 void radv_CmdWriteTimestamp(
1666 VkCommandBuffer commandBuffer
,
1667 VkPipelineStageFlagBits pipelineStage
,
1668 VkQueryPool queryPool
,
1671 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1672 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1673 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
1674 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1675 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1676 uint64_t query_va
= va
+ pool
->stride
* query
;
1678 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1680 emit_query_flush(cmd_buffer
, pool
);
1682 int num_queries
= 1;
1683 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
)
1684 num_queries
= util_bitcount(cmd_buffer
->state
.subpass
->view_mask
);
1686 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28 * num_queries
);
1688 for (unsigned i
= 0; i
< num_queries
; i
++) {
1689 switch(pipelineStage
) {
1690 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1691 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1692 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
1693 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
1694 COPY_DATA_DST_SEL(V_370_MEM_ASYNC
));
1697 radeon_emit(cs
, query_va
);
1698 radeon_emit(cs
, query_va
>> 32);
1701 si_cs_emit_write_event_eop(cs
,
1702 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1704 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1705 EOP_DATA_SEL_TIMESTAMP
,
1707 cmd_buffer
->gfx9_eop_bug_va
);
1710 query_va
+= pool
->stride
;
1712 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);