2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 #define TIMESTAMP_NOT_READY UINT64_MAX
40 static const int pipelinestat_block_size
= 11 * 8;
41 static const unsigned pipeline_statistics_indices
[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
43 static nir_ssa_def
*nir_test_flag(nir_builder
*b
, nir_ssa_def
*flags
, uint32_t flag
)
45 return nir_i2b(b
, nir_iand(b
, flags
, nir_imm_int(b
, flag
)));
48 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, nir_ssa_def
*count
)
50 nir_ssa_def
*counter
= nir_load_var(b
, var
);
52 nir_if
*if_stmt
= nir_if_create(b
->shader
);
53 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, count
));
54 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
56 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
58 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
59 nir_builder_instr_insert(b
, &instr
->instr
);
61 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
62 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
63 nir_store_var(b
, var
, counter
, 0x1);
66 static struct nir_ssa_def
*
67 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
69 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
70 nir_intrinsic_set_base(flags
, 0);
71 nir_intrinsic_set_range(flags
, 16);
72 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
73 flags
->num_components
= 1;
74 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
75 nir_builder_instr_insert(b
, &flags
->instr
);
76 return &flags
->dest
.ssa
;
80 build_occlusion_query_shader(struct radv_device
*device
) {
81 /* the shader this builds is roughly
85 * uint32_t dst_stride;
88 * uint32_t src_stride = 16 * db_count;
90 * location(binding = 0) buffer dst_buf;
91 * location(binding = 1) buffer src_buf;
94 * uint64_t result = 0;
95 * uint64_t src_offset = src_stride * global_id.x;
96 * uint64_t dst_offset = dst_stride * global_id.x;
97 * bool available = true;
98 * for (int i = 0; i < db_count; ++i) {
99 * if (enabled_rb_mask & (1 << i)) {
100 * uint64_t start = src_buf[src_offset + 16 * i];
101 * uint64_t end = src_buf[src_offset + 16 * i + 8];
102 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
103 * result += end - start;
108 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
109 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
110 * if (flags & VK_QUERY_RESULT_64_BIT)
111 * dst_buf[dst_offset] = result;
113 * dst_buf[dst_offset] = (uint32_t)result.
115 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
116 * dst_buf[dst_offset + elem_size] = available;
121 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
122 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "occlusion_query");
123 b
.shader
->info
.cs
.local_size
[0] = 64;
124 b
.shader
->info
.cs
.local_size
[1] = 1;
125 b
.shader
->info
.cs
.local_size
[2] = 1;
127 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
128 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
129 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
130 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
131 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
132 unsigned enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
133 unsigned db_count
= device
->physical_device
->rad_info
.num_render_backends
;
135 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
137 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
138 nir_intrinsic_vulkan_resource_index
);
139 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
140 dst_buf
->num_components
= 1;
141 nir_intrinsic_set_desc_set(dst_buf
, 0);
142 nir_intrinsic_set_binding(dst_buf
, 0);
143 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
144 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
146 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
147 nir_intrinsic_vulkan_resource_index
);
148 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
149 src_buf
->num_components
= 1;
150 nir_intrinsic_set_desc_set(src_buf
, 0);
151 nir_intrinsic_set_binding(src_buf
, 1);
152 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
153 nir_builder_instr_insert(&b
, &src_buf
->instr
);
155 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
156 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
157 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
158 b
.shader
->info
.cs
.local_size
[0],
159 b
.shader
->info
.cs
.local_size
[1],
160 b
.shader
->info
.cs
.local_size
[2], 0);
161 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
162 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
164 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
165 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
166 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
167 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
170 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
171 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
172 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
174 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
175 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
176 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
178 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
179 radv_break_on_count(&b
, outer_counter
, nir_imm_int(&b
, db_count
));
181 nir_ssa_def
*enabled_cond
=
182 nir_iand(&b
, nir_imm_int(&b
, enabled_rb_mask
),
183 nir_ishl(&b
, nir_imm_int(&b
, 1), current_outer_count
));
185 nir_if
*enabled_if
= nir_if_create(b
.shader
);
186 enabled_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, enabled_cond
));
187 nir_cf_node_insert(b
.cursor
, &enabled_if
->cf_node
);
189 b
.cursor
= nir_after_cf_list(&enabled_if
->then_list
);
191 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
192 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
194 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
195 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
196 load
->src
[1] = nir_src_for_ssa(load_offset
);
197 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
198 load
->num_components
= 2;
199 nir_intrinsic_set_align(load
, 16, 0);
200 nir_builder_instr_insert(&b
, &load
->instr
);
202 nir_store_var(&b
, start
, nir_channel(&b
, &load
->dest
.ssa
, 0), 0x1);
203 nir_store_var(&b
, end
, nir_channel(&b
, &load
->dest
.ssa
, 1), 0x1);
205 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
206 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
208 nir_if
*update_if
= nir_if_create(b
.shader
);
209 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
210 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
212 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
214 nir_store_var(&b
, result
,
215 nir_iadd(&b
, nir_load_var(&b
, result
),
216 nir_isub(&b
, nir_load_var(&b
, end
),
217 nir_load_var(&b
, start
))), 0x1);
219 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
221 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
223 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
225 /* Store the result if complete or if partial results have been requested. */
227 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
228 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
230 nir_if
*store_if
= nir_if_create(b
.shader
);
231 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
), nir_load_var(&b
, available
)));
232 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
234 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
236 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
237 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
238 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
240 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
242 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
243 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
244 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
245 store
->src
[2] = nir_src_for_ssa(output_base
);
246 nir_intrinsic_set_write_mask(store
, 0x1);
247 nir_intrinsic_set_align(store
, 8, 0);
248 store
->num_components
= 1;
249 nir_builder_instr_insert(&b
, &store
->instr
);
251 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
253 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
254 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
255 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
256 store
->src
[2] = nir_src_for_ssa(output_base
);
257 nir_intrinsic_set_write_mask(store
, 0x1);
258 nir_intrinsic_set_align(store
, 4, 0);
259 store
->num_components
= 1;
260 nir_builder_instr_insert(&b
, &store
->instr
);
262 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
264 /* Store the availability bit if requested. */
266 nir_if
*availability_if
= nir_if_create(b
.shader
);
267 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
268 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
270 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
272 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
273 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
274 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
275 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
276 nir_intrinsic_set_write_mask(store
, 0x1);
277 nir_intrinsic_set_align(store
, 4, 0);
278 store
->num_components
= 1;
279 nir_builder_instr_insert(&b
, &store
->instr
);
285 build_pipeline_statistics_query_shader(struct radv_device
*device
) {
286 /* the shader this builds is roughly
290 * uint32_t dst_stride;
291 * uint32_t stats_mask;
292 * uint32_t avail_offset;
295 * uint32_t src_stride = pipelinestat_block_size * 2;
297 * location(binding = 0) buffer dst_buf;
298 * location(binding = 1) buffer src_buf;
301 * uint64_t src_offset = src_stride * global_id.x;
302 * uint64_t dst_base = dst_stride * global_id.x;
303 * uint64_t dst_offset = dst_base;
304 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
305 * uint32_t elem_count = stats_mask >> 16;
306 * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
307 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
308 * dst_buf[dst_offset + elem_count * elem_size] = available32;
310 * if ((bool)available32) {
311 * // repeat 11 times:
312 * if (stats_mask & (1 << 0)) {
313 * uint64_t start = src_buf[src_offset + 8 * indices[0]];
314 * uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
315 * uint64_t result = end - start;
316 * if (flags & VK_QUERY_RESULT_64_BIT)
317 * dst_buf[dst_offset] = result;
319 * dst_buf[dst_offset] = (uint32_t)result.
320 * dst_offset += elem_size;
322 * } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
323 * // Set everything to 0 as we don't know what is valid.
324 * for (int i = 0; i < elem_count; ++i)
325 * dst_buf[dst_base + elem_size * i] = 0;
330 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
331 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "pipeline_statistics_query");
332 b
.shader
->info
.cs
.local_size
[0] = 64;
333 b
.shader
->info
.cs
.local_size
[1] = 1;
334 b
.shader
->info
.cs
.local_size
[2] = 1;
336 nir_variable
*output_offset
= nir_local_variable_create(b
.impl
, glsl_int_type(), "output_offset");
338 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
339 nir_ssa_def
*stats_mask
= radv_load_push_int(&b
, 8, "stats_mask");
340 nir_ssa_def
*avail_offset
= radv_load_push_int(&b
, 12, "avail_offset");
342 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
343 nir_intrinsic_vulkan_resource_index
);
344 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
345 dst_buf
->num_components
= 1;;
346 nir_intrinsic_set_desc_set(dst_buf
, 0);
347 nir_intrinsic_set_binding(dst_buf
, 0);
348 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
349 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
351 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
352 nir_intrinsic_vulkan_resource_index
);
353 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
354 src_buf
->num_components
= 1;
355 nir_intrinsic_set_desc_set(src_buf
, 0);
356 nir_intrinsic_set_binding(src_buf
, 1);
357 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
358 nir_builder_instr_insert(&b
, &src_buf
->instr
);
360 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
361 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
362 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
363 b
.shader
->info
.cs
.local_size
[0],
364 b
.shader
->info
.cs
.local_size
[1],
365 b
.shader
->info
.cs
.local_size
[2], 0);
366 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
367 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
369 nir_ssa_def
*input_stride
= nir_imm_int(&b
, pipelinestat_block_size
* 2);
370 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
371 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
372 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
375 avail_offset
= nir_iadd(&b
, avail_offset
,
376 nir_imul(&b
, global_id
, nir_imm_int(&b
, 4)));
378 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
379 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
380 load
->src
[1] = nir_src_for_ssa(avail_offset
);
381 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
382 load
->num_components
= 1;
383 nir_intrinsic_set_align(load
, 4, 0);
384 nir_builder_instr_insert(&b
, &load
->instr
);
385 nir_ssa_def
*available32
= &load
->dest
.ssa
;
387 nir_ssa_def
*result_is_64bit
= nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
388 nir_ssa_def
*elem_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
389 nir_ssa_def
*elem_count
= nir_ushr(&b
, stats_mask
, nir_imm_int(&b
, 16));
391 /* Store the availability bit if requested. */
393 nir_if
*availability_if
= nir_if_create(b
.shader
);
394 availability_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
395 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
397 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
399 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
400 store
->src
[0] = nir_src_for_ssa(available32
);
401 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
402 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, output_base
, nir_imul(&b
, elem_count
, elem_size
)));
403 nir_intrinsic_set_write_mask(store
, 0x1);
404 nir_intrinsic_set_align(store
, 4, 0);
405 store
->num_components
= 1;
406 nir_builder_instr_insert(&b
, &store
->instr
);
408 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
410 nir_if
*available_if
= nir_if_create(b
.shader
);
411 available_if
->condition
= nir_src_for_ssa(nir_i2b(&b
, available32
));
412 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
414 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
416 nir_store_var(&b
, output_offset
, output_base
, 0x1);
417 for (int i
= 0; i
< 11; ++i
) {
418 nir_if
*store_if
= nir_if_create(b
.shader
);
419 store_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, stats_mask
, 1u << i
));
420 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
422 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
424 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
425 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
426 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
427 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8)));
428 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
429 load
->num_components
= 1;
430 nir_intrinsic_set_align(load
, 8, 0);
431 nir_builder_instr_insert(&b
, &load
->instr
);
432 nir_ssa_def
*start
= &load
->dest
.ssa
;
434 load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
435 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
436 load
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
,
437 nir_imm_int(&b
, pipeline_statistics_indices
[i
] * 8 + pipelinestat_block_size
)));
438 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 64, NULL
);
439 load
->num_components
= 1;
440 nir_intrinsic_set_align(load
, 8, 0);
441 nir_builder_instr_insert(&b
, &load
->instr
);
442 nir_ssa_def
*end
= &load
->dest
.ssa
;
444 nir_ssa_def
*result
= nir_isub(&b
, end
, start
);
447 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
448 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
449 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
451 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
453 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
454 store
->src
[0] = nir_src_for_ssa(result
);
455 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
456 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
457 nir_intrinsic_set_write_mask(store
, 0x1);
458 nir_intrinsic_set_align(store
, 8, 0);
459 store
->num_components
= 1;
460 nir_builder_instr_insert(&b
, &store
->instr
);
462 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
464 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
465 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, result
));
466 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
467 store
->src
[2] = nir_src_for_ssa(nir_load_var(&b
, output_offset
));
468 nir_intrinsic_set_write_mask(store
, 0x1);
469 nir_intrinsic_set_align(store
, 4, 0);
470 store
->num_components
= 1;
471 nir_builder_instr_insert(&b
, &store
->instr
);
473 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
475 nir_store_var(&b
, output_offset
,
476 nir_iadd(&b
, nir_load_var(&b
, output_offset
),
479 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
482 b
.cursor
= nir_after_cf_list(&available_if
->else_list
);
484 available_if
= nir_if_create(b
.shader
);
485 available_if
->condition
= nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
));
486 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
488 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
490 /* Stores zeros in all outputs. */
492 nir_variable
*counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "counter");
493 nir_store_var(&b
, counter
, nir_imm_int(&b
, 0), 0x1);
495 nir_loop
*loop
= nir_loop_create(b
.shader
);
496 nir_builder_cf_insert(&b
, &loop
->cf_node
);
497 b
.cursor
= nir_after_cf_list(&loop
->body
);
499 nir_ssa_def
*current_counter
= nir_load_var(&b
, counter
);
500 radv_break_on_count(&b
, counter
, elem_count
);
502 nir_ssa_def
*output_elem
= nir_iadd(&b
, output_base
,
503 nir_imul(&b
, elem_size
, current_counter
));
505 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
506 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
507 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
509 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
511 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
512 store
->src
[0] = nir_src_for_ssa(nir_imm_int64(&b
, 0));
513 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
514 store
->src
[2] = nir_src_for_ssa(output_elem
);
515 nir_intrinsic_set_write_mask(store
, 0x1);
516 nir_intrinsic_set_align(store
, 8, 0);
517 store
->num_components
= 1;
518 nir_builder_instr_insert(&b
, &store
->instr
);
520 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
522 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
523 store
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
524 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
525 store
->src
[2] = nir_src_for_ssa(output_elem
);
526 nir_intrinsic_set_write_mask(store
, 0x1);
527 nir_intrinsic_set_align(store
, 4, 0);
528 store
->num_components
= 1;
529 nir_builder_instr_insert(&b
, &store
->instr
);
531 b
.cursor
= nir_after_cf_node(&loop
->cf_node
);
536 build_tfb_query_shader(struct radv_device
*device
)
538 /* the shader this builds is roughly
540 * uint32_t src_stride = 32;
542 * location(binding = 0) buffer dst_buf;
543 * location(binding = 1) buffer src_buf;
546 * uint64_t result[2] = {};
547 * bool available = false;
548 * uint64_t src_offset = src_stride * global_id.x;
549 * uint64_t dst_offset = dst_stride * global_id.x;
550 * uint64_t *src_data = src_buf[src_offset];
551 * uint32_t avail = (src_data[0] >> 32) &
552 * (src_data[1] >> 32) &
553 * (src_data[2] >> 32) &
554 * (src_data[3] >> 32);
555 * if (avail & 0x80000000) {
556 * result[0] = src_data[3] - src_data[1];
557 * result[1] = src_data[2] - src_data[0];
560 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
561 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
562 * if (flags & VK_QUERY_RESULT_64_BIT) {
563 * dst_buf[dst_offset] = result;
565 * dst_buf[dst_offset] = (uint32_t)result;
568 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
569 * dst_buf[dst_offset + result_size] = available;
574 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
575 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "tfb_query");
576 b
.shader
->info
.cs
.local_size
[0] = 64;
577 b
.shader
->info
.cs
.local_size
[1] = 1;
578 b
.shader
->info
.cs
.local_size
[2] = 1;
580 /* Create and initialize local variables. */
581 nir_variable
*result
=
582 nir_local_variable_create(b
.impl
,
583 glsl_vector_type(GLSL_TYPE_UINT64
, 2),
585 nir_variable
*available
=
586 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
588 nir_store_var(&b
, result
,
589 nir_vec2(&b
, nir_imm_int64(&b
, 0),
590 nir_imm_int64(&b
, 0)), 0x3);
591 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
593 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
595 /* Load resources. */
596 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
597 nir_intrinsic_vulkan_resource_index
);
598 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
599 dst_buf
->num_components
= 1;
600 nir_intrinsic_set_desc_set(dst_buf
, 0);
601 nir_intrinsic_set_binding(dst_buf
, 0);
602 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
603 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
605 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
606 nir_intrinsic_vulkan_resource_index
);
607 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
608 src_buf
->num_components
= 1;
609 nir_intrinsic_set_desc_set(src_buf
, 0);
610 nir_intrinsic_set_binding(src_buf
, 1);
611 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
612 nir_builder_instr_insert(&b
, &src_buf
->instr
);
614 /* Compute global ID. */
615 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
616 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
617 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
618 b
.shader
->info
.cs
.local_size
[0],
619 b
.shader
->info
.cs
.local_size
[1],
620 b
.shader
->info
.cs
.local_size
[2], 0);
621 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
622 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
624 /* Compute src/dst strides. */
625 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 32);
626 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
627 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
628 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
630 /* Load data from the query pool. */
631 nir_intrinsic_instr
*load1
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
632 load1
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
633 load1
->src
[1] = nir_src_for_ssa(input_base
);
634 nir_ssa_dest_init(&load1
->instr
, &load1
->dest
, 4, 32, NULL
);
635 load1
->num_components
= 4;
636 nir_intrinsic_set_align(load1
, 32, 0);
637 nir_builder_instr_insert(&b
, &load1
->instr
);
639 nir_intrinsic_instr
*load2
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
640 load2
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
641 load2
->src
[1] = nir_src_for_ssa(nir_iadd(&b
, input_base
, nir_imm_int(&b
, 16)));
642 nir_ssa_dest_init(&load2
->instr
, &load2
->dest
, 4, 32, NULL
);
643 load2
->num_components
= 4;
644 nir_intrinsic_set_align(load2
, 16, 0);
645 nir_builder_instr_insert(&b
, &load2
->instr
);
647 /* Check if result is available. */
648 nir_ssa_def
*avails
[2];
649 avails
[0] = nir_iand(&b
, nir_channel(&b
, &load1
->dest
.ssa
, 1),
650 nir_channel(&b
, &load1
->dest
.ssa
, 3));
651 avails
[1] = nir_iand(&b
, nir_channel(&b
, &load2
->dest
.ssa
, 1),
652 nir_channel(&b
, &load2
->dest
.ssa
, 3));
653 nir_ssa_def
*result_is_available
=
654 nir_i2b(&b
, nir_iand(&b
, nir_iand(&b
, avails
[0], avails
[1]),
655 nir_imm_int(&b
, 0x80000000)));
657 /* Only compute result if available. */
658 nir_if
*available_if
= nir_if_create(b
.shader
);
659 available_if
->condition
= nir_src_for_ssa(result_is_available
);
660 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
662 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
665 nir_ssa_def
*packed64
[4];
666 packed64
[0] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
667 nir_channel(&b
, &load1
->dest
.ssa
, 0),
668 nir_channel(&b
, &load1
->dest
.ssa
, 1)));
669 packed64
[1] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
670 nir_channel(&b
, &load1
->dest
.ssa
, 2),
671 nir_channel(&b
, &load1
->dest
.ssa
, 3)));
672 packed64
[2] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
673 nir_channel(&b
, &load2
->dest
.ssa
, 0),
674 nir_channel(&b
, &load2
->dest
.ssa
, 1)));
675 packed64
[3] = nir_pack_64_2x32(&b
, nir_vec2(&b
,
676 nir_channel(&b
, &load2
->dest
.ssa
, 2),
677 nir_channel(&b
, &load2
->dest
.ssa
, 3)));
679 /* Compute result. */
680 nir_ssa_def
*num_primitive_written
=
681 nir_isub(&b
, packed64
[3], packed64
[1]);
682 nir_ssa_def
*primitive_storage_needed
=
683 nir_isub(&b
, packed64
[2], packed64
[0]);
685 nir_store_var(&b
, result
,
686 nir_vec2(&b
, num_primitive_written
,
687 primitive_storage_needed
), 0x3);
688 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
690 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
692 /* Determine if result is 64 or 32 bit. */
693 nir_ssa_def
*result_is_64bit
=
694 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
695 nir_ssa_def
*result_size
=
696 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 16),
699 /* Store the result if complete or partial results have been requested. */
700 nir_if
*store_if
= nir_if_create(b
.shader
);
701 store_if
->condition
=
702 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
703 nir_load_var(&b
, available
)));
704 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
706 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
709 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
710 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
711 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
713 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
715 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
716 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
717 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
718 store
->src
[2] = nir_src_for_ssa(output_base
);
719 nir_intrinsic_set_write_mask(store
, 0x3);
720 nir_intrinsic_set_align(store
, 8, 0);
721 store
->num_components
= 2;
722 nir_builder_instr_insert(&b
, &store
->instr
);
724 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
726 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
727 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
728 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
729 store
->src
[2] = nir_src_for_ssa(output_base
);
730 nir_intrinsic_set_write_mask(store
, 0x3);
731 nir_intrinsic_set_align(store
, 4, 0);
732 store
->num_components
= 2;
733 nir_builder_instr_insert(&b
, &store
->instr
);
735 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
737 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
739 /* Store the availability bit if requested. */
740 nir_if
*availability_if
= nir_if_create(b
.shader
);
741 availability_if
->condition
=
742 nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
743 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
745 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
747 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
748 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
749 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
750 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
751 nir_intrinsic_set_write_mask(store
, 0x1);
752 nir_intrinsic_set_align(store
, 4, 0);
753 store
->num_components
= 1;
754 nir_builder_instr_insert(&b
, &store
->instr
);
756 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
762 build_timestamp_query_shader(struct radv_device
*device
)
764 /* the shader this builds is roughly
766 * uint32_t src_stride = 8;
768 * location(binding = 0) buffer dst_buf;
769 * location(binding = 1) buffer src_buf;
772 * uint64_t result = 0;
773 * bool available = false;
774 * uint64_t src_offset = src_stride * global_id.x;
775 * uint64_t dst_offset = dst_stride * global_id.x;
776 * uint64_t timestamp = src_buf[src_offset];
777 * if (timestamp != TIMESTAMP_NOT_READY) {
778 * result = timestamp;
781 * uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
782 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
783 * if (flags & VK_QUERY_RESULT_64_BIT) {
784 * dst_buf[dst_offset] = result;
786 * dst_buf[dst_offset] = (uint32_t)result;
789 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
790 * dst_buf[dst_offset + result_size] = available;
795 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
796 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "timestamp_query");
797 b
.shader
->info
.cs
.local_size
[0] = 64;
798 b
.shader
->info
.cs
.local_size
[1] = 1;
799 b
.shader
->info
.cs
.local_size
[2] = 1;
801 /* Create and initialize local variables. */
802 nir_variable
*result
=
803 nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
804 nir_variable
*available
=
805 nir_local_variable_create(b
.impl
, glsl_bool_type(), "available");
807 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
808 nir_store_var(&b
, available
, nir_imm_false(&b
), 0x1);
810 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
812 /* Load resources. */
813 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
814 nir_intrinsic_vulkan_resource_index
);
815 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
816 dst_buf
->num_components
= 1;
817 nir_intrinsic_set_desc_set(dst_buf
, 0);
818 nir_intrinsic_set_binding(dst_buf
, 0);
819 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, dst_buf
->num_components
, 32, NULL
);
820 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
822 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
823 nir_intrinsic_vulkan_resource_index
);
824 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
825 src_buf
->num_components
= 1;
826 nir_intrinsic_set_desc_set(src_buf
, 0);
827 nir_intrinsic_set_binding(src_buf
, 1);
828 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, src_buf
->num_components
, 32, NULL
);
829 nir_builder_instr_insert(&b
, &src_buf
->instr
);
831 /* Compute global ID. */
832 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
833 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
834 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
835 b
.shader
->info
.cs
.local_size
[0],
836 b
.shader
->info
.cs
.local_size
[1],
837 b
.shader
->info
.cs
.local_size
[2], 0);
838 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
839 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
841 /* Compute src/dst strides. */
842 nir_ssa_def
*input_stride
= nir_imm_int(&b
, 8);
843 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
844 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
845 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
847 /* Load data from the query pool. */
848 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
849 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
850 load
->src
[1] = nir_src_for_ssa(input_base
);
851 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 32, NULL
);
852 load
->num_components
= 2;
853 nir_intrinsic_set_align(load
, 8, 0);
854 nir_builder_instr_insert(&b
, &load
->instr
);
856 /* Pack the timestamp. */
857 nir_ssa_def
*timestamp
;
858 timestamp
= nir_pack_64_2x32(&b
, nir_vec2(&b
,
859 nir_channel(&b
, &load
->dest
.ssa
, 0),
860 nir_channel(&b
, &load
->dest
.ssa
, 1)));
862 /* Check if result is available. */
863 nir_ssa_def
*result_is_available
=
864 nir_i2b(&b
, nir_ine(&b
, timestamp
,
865 nir_imm_int64(&b
, TIMESTAMP_NOT_READY
)));
867 /* Only store result if available. */
868 nir_if
*available_if
= nir_if_create(b
.shader
);
869 available_if
->condition
= nir_src_for_ssa(result_is_available
);
870 nir_cf_node_insert(b
.cursor
, &available_if
->cf_node
);
872 b
.cursor
= nir_after_cf_list(&available_if
->then_list
);
874 nir_store_var(&b
, result
, timestamp
, 0x1);
875 nir_store_var(&b
, available
, nir_imm_true(&b
), 0x1);
877 b
.cursor
= nir_after_cf_node(&available_if
->cf_node
);
879 /* Determine if result is 64 or 32 bit. */
880 nir_ssa_def
*result_is_64bit
=
881 nir_test_flag(&b
, flags
, VK_QUERY_RESULT_64_BIT
);
882 nir_ssa_def
*result_size
=
883 nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8),
886 /* Store the result if complete or partial results have been requested. */
887 nir_if
*store_if
= nir_if_create(b
.shader
);
888 store_if
->condition
=
889 nir_src_for_ssa(nir_ior(&b
, nir_test_flag(&b
, flags
, VK_QUERY_RESULT_PARTIAL_BIT
),
890 nir_load_var(&b
, available
)));
891 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
893 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
896 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
897 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
898 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
900 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
902 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
903 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
904 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
905 store
->src
[2] = nir_src_for_ssa(output_base
);
906 nir_intrinsic_set_write_mask(store
, 0x1);
907 nir_intrinsic_set_align(store
, 8, 0);
908 store
->num_components
= 1;
909 nir_builder_instr_insert(&b
, &store
->instr
);
911 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
913 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
914 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
915 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
916 store
->src
[2] = nir_src_for_ssa(output_base
);
917 nir_intrinsic_set_write_mask(store
, 0x1);
918 nir_intrinsic_set_align(store
, 4, 0);
919 store
->num_components
= 1;
920 nir_builder_instr_insert(&b
, &store
->instr
);
922 b
.cursor
= nir_after_cf_node(&store_64bit_if
->cf_node
);
924 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
926 /* Store the availability bit if requested. */
927 nir_if
*availability_if
= nir_if_create(b
.shader
);
928 availability_if
->condition
=
929 nir_src_for_ssa(nir_test_flag(&b
, flags
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
));
930 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
932 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
934 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
935 store
->src
[0] = nir_src_for_ssa(nir_b2i32(&b
, nir_load_var(&b
, available
)));
936 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
937 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
938 nir_intrinsic_set_write_mask(store
, 0x1);
939 nir_intrinsic_set_align(store
, 4, 0);
940 store
->num_components
= 1;
941 nir_builder_instr_insert(&b
, &store
->instr
);
943 b
.cursor
= nir_after_cf_node(&availability_if
->cf_node
);
948 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device
*device
)
951 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
952 struct radv_shader_module pipeline_statistics_cs
= { .nir
= NULL
};
953 struct radv_shader_module tfb_cs
= { .nir
= NULL
};
954 struct radv_shader_module timestamp_cs
= { .nir
= NULL
};
956 mtx_lock(&device
->meta_state
.mtx
);
957 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
) {
958 mtx_unlock(&device
->meta_state
.mtx
);
961 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
962 pipeline_statistics_cs
.nir
= build_pipeline_statistics_query_shader(device
);
963 tfb_cs
.nir
= build_tfb_query_shader(device
);
964 timestamp_cs
.nir
= build_timestamp_query_shader(device
);
966 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
967 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
968 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
970 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
973 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
974 .descriptorCount
= 1,
975 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
976 .pImmutableSamplers
= NULL
980 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
981 .descriptorCount
= 1,
982 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
983 .pImmutableSamplers
= NULL
988 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
989 &occlusion_ds_create_info
,
990 &device
->meta_state
.alloc
,
991 &device
->meta_state
.query
.ds_layout
);
992 if (result
!= VK_SUCCESS
)
995 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
996 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
998 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
999 .pushConstantRangeCount
= 1,
1000 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 16},
1003 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
1004 &occlusion_pl_create_info
,
1005 &device
->meta_state
.alloc
,
1006 &device
->meta_state
.query
.p_layout
);
1007 if (result
!= VK_SUCCESS
)
1010 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
1011 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1012 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1013 .module
= radv_shader_module_to_handle(&occlusion_cs
),
1015 .pSpecializationInfo
= NULL
,
1018 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
1019 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1020 .stage
= occlusion_pipeline_shader_stage
,
1022 .layout
= device
->meta_state
.query
.p_layout
,
1025 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1026 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1027 1, &occlusion_vk_pipeline_info
, NULL
,
1028 &device
->meta_state
.query
.occlusion_query_pipeline
);
1029 if (result
!= VK_SUCCESS
)
1032 VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage
= {
1033 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1034 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1035 .module
= radv_shader_module_to_handle(&pipeline_statistics_cs
),
1037 .pSpecializationInfo
= NULL
,
1040 VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info
= {
1041 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1042 .stage
= pipeline_statistics_pipeline_shader_stage
,
1044 .layout
= device
->meta_state
.query
.p_layout
,
1047 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1048 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1049 1, &pipeline_statistics_vk_pipeline_info
, NULL
,
1050 &device
->meta_state
.query
.pipeline_statistics_query_pipeline
);
1051 if (result
!= VK_SUCCESS
)
1054 VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage
= {
1055 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1056 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1057 .module
= radv_shader_module_to_handle(&tfb_cs
),
1059 .pSpecializationInfo
= NULL
,
1062 VkComputePipelineCreateInfo tfb_pipeline_info
= {
1063 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1064 .stage
= tfb_pipeline_shader_stage
,
1066 .layout
= device
->meta_state
.query
.p_layout
,
1069 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1070 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1071 1, &tfb_pipeline_info
, NULL
,
1072 &device
->meta_state
.query
.tfb_query_pipeline
);
1073 if (result
!= VK_SUCCESS
)
1076 VkPipelineShaderStageCreateInfo timestamp_pipeline_shader_stage
= {
1077 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1078 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
1079 .module
= radv_shader_module_to_handle(×tamp_cs
),
1081 .pSpecializationInfo
= NULL
,
1084 VkComputePipelineCreateInfo timestamp_pipeline_info
= {
1085 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
1086 .stage
= timestamp_pipeline_shader_stage
,
1088 .layout
= device
->meta_state
.query
.p_layout
,
1091 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
1092 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
1093 1, ×tamp_pipeline_info
, NULL
,
1094 &device
->meta_state
.query
.timestamp_query_pipeline
);
1097 if (result
!= VK_SUCCESS
)
1098 radv_device_finish_meta_query_state(device
);
1099 ralloc_free(occlusion_cs
.nir
);
1100 ralloc_free(pipeline_statistics_cs
.nir
);
1101 ralloc_free(tfb_cs
.nir
);
1102 ralloc_free(timestamp_cs
.nir
);
1103 mtx_unlock(&device
->meta_state
.mtx
);
1107 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
, bool on_demand
)
1112 return radv_device_init_meta_query_state_internal(device
);
1115 void radv_device_finish_meta_query_state(struct radv_device
*device
)
1117 if (device
->meta_state
.query
.tfb_query_pipeline
)
1118 radv_DestroyPipeline(radv_device_to_handle(device
),
1119 device
->meta_state
.query
.tfb_query_pipeline
,
1120 &device
->meta_state
.alloc
);
1122 if (device
->meta_state
.query
.pipeline_statistics_query_pipeline
)
1123 radv_DestroyPipeline(radv_device_to_handle(device
),
1124 device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1125 &device
->meta_state
.alloc
);
1127 if (device
->meta_state
.query
.occlusion_query_pipeline
)
1128 radv_DestroyPipeline(radv_device_to_handle(device
),
1129 device
->meta_state
.query
.occlusion_query_pipeline
,
1130 &device
->meta_state
.alloc
);
1132 if (device
->meta_state
.query
.timestamp_query_pipeline
)
1133 radv_DestroyPipeline(radv_device_to_handle(device
),
1134 device
->meta_state
.query
.timestamp_query_pipeline
,
1135 &device
->meta_state
.alloc
);
1137 if (device
->meta_state
.query
.p_layout
)
1138 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
1139 device
->meta_state
.query
.p_layout
,
1140 &device
->meta_state
.alloc
);
1142 if (device
->meta_state
.query
.ds_layout
)
1143 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
1144 device
->meta_state
.query
.ds_layout
,
1145 &device
->meta_state
.alloc
);
1148 static void radv_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
1149 VkPipeline
*pipeline
,
1150 struct radeon_winsys_bo
*src_bo
,
1151 struct radeon_winsys_bo
*dst_bo
,
1152 uint64_t src_offset
, uint64_t dst_offset
,
1153 uint32_t src_stride
, uint32_t dst_stride
,
1154 uint32_t count
, uint32_t flags
,
1155 uint32_t pipeline_stats_mask
, uint32_t avail_offset
)
1157 struct radv_device
*device
= cmd_buffer
->device
;
1158 struct radv_meta_saved_state saved_state
;
1159 bool old_predicating
;
1162 VkResult ret
= radv_device_init_meta_query_state_internal(device
);
1163 if (ret
!= VK_SUCCESS
) {
1164 cmd_buffer
->record_result
= ret
;
1169 radv_meta_save(&saved_state
, cmd_buffer
,
1170 RADV_META_SAVE_COMPUTE_PIPELINE
|
1171 RADV_META_SAVE_CONSTANTS
|
1172 RADV_META_SAVE_DESCRIPTORS
);
1174 /* VK_EXT_conditional_rendering says that copy commands should not be
1175 * affected by conditional rendering.
1177 old_predicating
= cmd_buffer
->state
.predicating
;
1178 cmd_buffer
->state
.predicating
= false;
1180 struct radv_buffer dst_buffer
= {
1182 .offset
= dst_offset
,
1183 .size
= dst_stride
* count
1186 struct radv_buffer src_buffer
= {
1188 .offset
= src_offset
,
1189 .size
= MAX2(src_stride
* count
, avail_offset
+ 4 * count
- src_offset
)
1192 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
1193 VK_PIPELINE_BIND_POINT_COMPUTE
, *pipeline
);
1195 radv_meta_push_descriptor_set(cmd_buffer
,
1196 VK_PIPELINE_BIND_POINT_COMPUTE
,
1197 device
->meta_state
.query
.p_layout
,
1199 2, /* descriptorWriteCount */
1200 (VkWriteDescriptorSet
[]) {
1202 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
1204 .dstArrayElement
= 0,
1205 .descriptorCount
= 1,
1206 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
1207 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
1208 .buffer
= radv_buffer_to_handle(&dst_buffer
),
1210 .range
= VK_WHOLE_SIZE
1214 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
1216 .dstArrayElement
= 0,
1217 .descriptorCount
= 1,
1218 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
1219 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
1220 .buffer
= radv_buffer_to_handle(&src_buffer
),
1222 .range
= VK_WHOLE_SIZE
1227 /* Encode the number of elements for easy access by the shader. */
1228 pipeline_stats_mask
&= 0x7ff;
1229 pipeline_stats_mask
|= util_bitcount(pipeline_stats_mask
) << 16;
1231 avail_offset
-= src_offset
;
1235 uint32_t dst_stride
;
1236 uint32_t pipeline_stats_mask
;
1237 uint32_t avail_offset
;
1238 } push_constants
= {
1241 pipeline_stats_mask
,
1245 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
1246 device
->meta_state
.query
.p_layout
,
1247 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
1250 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_L2
|
1251 RADV_CMD_FLAG_INV_VCACHE
;
1253 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1254 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
1256 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
1258 /* Restore conditional rendering. */
1259 cmd_buffer
->state
.predicating
= old_predicating
;
1261 radv_meta_restore(&saved_state
, cmd_buffer
);
1264 VkResult
radv_CreateQueryPool(
1266 const VkQueryPoolCreateInfo
* pCreateInfo
,
1267 const VkAllocationCallbacks
* pAllocator
,
1268 VkQueryPool
* pQueryPool
)
1270 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1271 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
1273 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1276 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1279 switch(pCreateInfo
->queryType
) {
1280 case VK_QUERY_TYPE_OCCLUSION
:
1281 pool
->stride
= 16 * device
->physical_device
->rad_info
.num_render_backends
;
1283 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1284 pool
->stride
= pipelinestat_block_size
* 2;
1286 case VK_QUERY_TYPE_TIMESTAMP
:
1289 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1293 unreachable("creating unhandled query type");
1296 pool
->type
= pCreateInfo
->queryType
;
1297 pool
->pipeline_stats_mask
= pCreateInfo
->pipelineStatistics
;
1298 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
1299 pool
->size
= pool
->availability_offset
;
1300 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_PIPELINE_STATISTICS
)
1301 pool
->size
+= 4 * pCreateInfo
->queryCount
;
1303 pool
->bo
= device
->ws
->buffer_create(device
->ws
, pool
->size
,
1304 64, RADEON_DOMAIN_GTT
, RADEON_FLAG_NO_INTERPROCESS_SHARING
,
1305 RADV_BO_PRIORITY_QUERY_POOL
);
1308 vk_free2(&device
->alloc
, pAllocator
, pool
);
1309 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1312 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
1315 device
->ws
->buffer_destroy(pool
->bo
);
1316 vk_free2(&device
->alloc
, pAllocator
, pool
);
1317 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1320 *pQueryPool
= radv_query_pool_to_handle(pool
);
1324 void radv_DestroyQueryPool(
1327 const VkAllocationCallbacks
* pAllocator
)
1329 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1330 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
1335 device
->ws
->buffer_destroy(pool
->bo
);
1336 vk_free2(&device
->alloc
, pAllocator
, pool
);
1339 VkResult
radv_GetQueryPoolResults(
1341 VkQueryPool queryPool
,
1342 uint32_t firstQuery
,
1343 uint32_t queryCount
,
1346 VkDeviceSize stride
,
1347 VkQueryResultFlags flags
)
1349 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1350 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1352 VkResult result
= VK_SUCCESS
;
1354 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
1356 unsigned query
= firstQuery
+ i
;
1357 char *src
= pool
->ptr
+ query
* pool
->stride
;
1360 switch (pool
->type
) {
1361 case VK_QUERY_TYPE_TIMESTAMP
: {
1362 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1363 available
= *src64
!= TIMESTAMP_NOT_READY
;
1365 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1366 while (*src64
== TIMESTAMP_NOT_READY
)
1371 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1372 result
= VK_NOT_READY
;
1374 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1375 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1376 *(uint64_t*)dest
= *src64
;
1379 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1380 *(uint32_t*)dest
= *(volatile uint32_t*)src
;
1385 case VK_QUERY_TYPE_OCCLUSION
: {
1386 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1387 uint32_t db_count
= device
->physical_device
->rad_info
.num_render_backends
;
1388 uint32_t enabled_rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
1389 uint64_t sample_count
= 0;
1392 for (int i
= 0; i
< db_count
; ++i
) {
1393 uint64_t start
, end
;
1395 if (!(enabled_rb_mask
& (1 << i
)))
1399 start
= src64
[2 * i
];
1400 end
= src64
[2 * i
+ 1];
1401 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
1403 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
1406 sample_count
+= end
- start
;
1410 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1411 result
= VK_NOT_READY
;
1413 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1414 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1415 *(uint64_t*)dest
= sample_count
;
1418 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1419 *(uint32_t*)dest
= sample_count
;
1424 case VK_QUERY_TYPE_PIPELINE_STATISTICS
: {
1425 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
1426 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
1428 available
= *(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
1430 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1431 result
= VK_NOT_READY
;
1433 const volatile uint64_t *start
= (uint64_t*)src
;
1434 const volatile uint64_t *stop
= (uint64_t*)(src
+ pipelinestat_block_size
);
1435 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1436 uint64_t *dst
= (uint64_t*)dest
;
1437 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 8;
1438 for(int i
= 0; i
< 11; ++i
) {
1439 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1440 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1441 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1442 start
[pipeline_statistics_indices
[i
]];
1448 uint32_t *dst
= (uint32_t*)dest
;
1449 dest
+= util_bitcount(pool
->pipeline_stats_mask
) * 4;
1450 for(int i
= 0; i
< 11; ++i
) {
1451 if(pool
->pipeline_stats_mask
& (1u << i
)) {
1452 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1453 *dst
= stop
[pipeline_statistics_indices
[i
]] -
1454 start
[pipeline_statistics_indices
[i
]];
1461 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
: {
1462 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
1463 uint64_t num_primitives_written
;
1464 uint64_t primitive_storage_needed
;
1466 /* SAMPLE_STREAMOUTSTATS stores this structure:
1468 * u64 NumPrimitivesWritten;
1469 * u64 PrimitiveStorageNeeded;
1473 for (int j
= 0; j
< 4; j
++) {
1474 if (!(src64
[j
] & 0x8000000000000000UL
))
1478 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1479 result
= VK_NOT_READY
;
1481 num_primitives_written
= src64
[3] - src64
[1];
1482 primitive_storage_needed
= src64
[2] - src64
[0];
1484 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1485 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1486 *(uint64_t *)dest
= num_primitives_written
;
1488 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1489 *(uint64_t *)dest
= primitive_storage_needed
;
1492 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1493 *(uint32_t *)dest
= num_primitives_written
;
1495 if (available
|| (flags
& VK_QUERY_RESULT_PARTIAL_BIT
))
1496 *(uint32_t *)dest
= primitive_storage_needed
;
1502 unreachable("trying to get results of unhandled query type");
1505 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1506 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1507 *(uint64_t*)dest
= available
;
1509 *(uint32_t*)dest
= available
;
1517 static void emit_query_flush(struct radv_cmd_buffer
*cmd_buffer
,
1518 struct radv_query_pool
*pool
)
1520 if (cmd_buffer
->pending_reset_query
) {
1521 if (pool
->size
>= RADV_BUFFER_OPS_CS_THRESHOLD
) {
1522 /* Only need to flush caches if the query pool size is
1523 * large enough to be resetted using the compute shader
1524 * path. Small pools don't need any cache flushes
1525 * because we use a CP dma clear.
1527 si_emit_cache_flush(cmd_buffer
);
1532 void radv_CmdCopyQueryPoolResults(
1533 VkCommandBuffer commandBuffer
,
1534 VkQueryPool queryPool
,
1535 uint32_t firstQuery
,
1536 uint32_t queryCount
,
1538 VkDeviceSize dstOffset
,
1539 VkDeviceSize stride
,
1540 VkQueryResultFlags flags
)
1542 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1543 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1544 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
1545 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1546 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1547 uint64_t dest_va
= radv_buffer_get_va(dst_buffer
->bo
);
1548 dest_va
+= dst_buffer
->offset
+ dstOffset
;
1550 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, pool
->bo
);
1551 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, dst_buffer
->bo
);
1553 /* From the Vulkan spec 1.1.108:
1555 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1556 * previous uses of vkCmdResetQueryPool in the same queue, without any
1557 * additional synchronization."
1559 * So, we have to flush the caches if the compute shader path was used.
1561 emit_query_flush(cmd_buffer
, pool
);
1563 switch (pool
->type
) {
1564 case VK_QUERY_TYPE_OCCLUSION
:
1565 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1566 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1567 unsigned query
= firstQuery
+ i
;
1568 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
1570 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1572 /* Waits on the upper word of the last DB entry */
1573 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1574 src_va
, 0x80000000, 0xffffffff);
1577 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.occlusion_query_pipeline
,
1578 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1579 dst_buffer
->offset
+ dstOffset
,
1580 pool
->stride
, stride
,
1581 queryCount
, flags
, 0, 0);
1583 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1584 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1585 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1586 unsigned query
= firstQuery
+ i
;
1588 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1590 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1592 /* This waits on the ME. All copies below are done on the ME */
1593 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
,
1594 avail_va
, 1, 0xffffffff);
1597 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.pipeline_statistics_query_pipeline
,
1598 pool
->bo
, dst_buffer
->bo
, firstQuery
* pool
->stride
,
1599 dst_buffer
->offset
+ dstOffset
,
1600 pool
->stride
, stride
, queryCount
, flags
,
1601 pool
->pipeline_stats_mask
,
1602 pool
->availability_offset
+ 4 * firstQuery
);
1604 case VK_QUERY_TYPE_TIMESTAMP
:
1605 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1606 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
1607 unsigned query
= firstQuery
+ i
;
1608 uint64_t local_src_va
= va
+ query
* pool
->stride
;
1610 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1612 /* Wait on the high 32 bits of the timestamp in
1613 * case the low part is 0xffffffff.
1615 radv_cp_wait_mem(cs
, WAIT_REG_MEM_NOT_EQUAL
,
1617 TIMESTAMP_NOT_READY
>> 32,
1622 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.timestamp_query_pipeline
,
1623 pool
->bo
, dst_buffer
->bo
,
1624 firstQuery
* pool
->stride
,
1625 dst_buffer
->offset
+ dstOffset
,
1626 pool
->stride
, stride
,
1627 queryCount
, flags
, 0, 0);
1629 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1630 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1631 for(unsigned i
= 0; i
< queryCount
; i
++) {
1632 unsigned query
= firstQuery
+ i
;
1633 uint64_t src_va
= va
+ query
* pool
->stride
;
1635 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7 * 4);
1637 /* Wait on the upper word of all results. */
1638 for (unsigned j
= 0; j
< 4; j
++, src_va
+= 8) {
1639 radv_cp_wait_mem(cs
, WAIT_REG_MEM_GREATER_OR_EQUAL
,
1640 src_va
+ 4, 0x80000000,
1646 radv_query_shader(cmd_buffer
, &cmd_buffer
->device
->meta_state
.query
.tfb_query_pipeline
,
1647 pool
->bo
, dst_buffer
->bo
,
1648 firstQuery
* pool
->stride
,
1649 dst_buffer
->offset
+ dstOffset
,
1650 pool
->stride
, stride
,
1651 queryCount
, flags
, 0, 0);
1654 unreachable("trying to get results of unhandled query type");
1659 void radv_CmdResetQueryPool(
1660 VkCommandBuffer commandBuffer
,
1661 VkQueryPool queryPool
,
1662 uint32_t firstQuery
,
1663 uint32_t queryCount
)
1665 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1666 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1667 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1668 ? TIMESTAMP_NOT_READY
: 0;
1669 uint32_t flush_bits
= 0;
1671 /* Make sure to sync all previous work if the given command buffer has
1672 * pending active queries. Otherwise the GPU might write queries data
1673 * after the reset operation.
1675 cmd_buffer
->state
.flush_bits
|= cmd_buffer
->active_query_flush_bits
;
1677 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1678 firstQuery
* pool
->stride
,
1679 queryCount
* pool
->stride
, value
);
1681 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1682 flush_bits
|= radv_fill_buffer(cmd_buffer
, pool
->bo
,
1683 pool
->availability_offset
+ firstQuery
* 4,
1688 /* Only need to flush caches for the compute shader path. */
1689 cmd_buffer
->pending_reset_query
= true;
1690 cmd_buffer
->state
.flush_bits
|= flush_bits
;
1694 void radv_ResetQueryPoolEXT(
1696 VkQueryPool queryPool
,
1697 uint32_t firstQuery
,
1698 uint32_t queryCount
)
1700 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1702 uint32_t value
= pool
->type
== VK_QUERY_TYPE_TIMESTAMP
1703 ? TIMESTAMP_NOT_READY
: 0;
1704 uint32_t *data
= (uint32_t*)(pool
->ptr
+ firstQuery
* pool
->stride
);
1705 uint32_t *data_end
= (uint32_t*)(pool
->ptr
+ (firstQuery
+ queryCount
) * pool
->stride
);
1707 for(uint32_t *p
= data
; p
!= data_end
; ++p
)
1710 if (pool
->type
== VK_QUERY_TYPE_PIPELINE_STATISTICS
) {
1711 memset(pool
->ptr
+ pool
->availability_offset
+ firstQuery
* 4,
1716 static unsigned event_type_for_stream(unsigned stream
)
1720 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
1721 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
1722 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
1723 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
1727 static void emit_begin_query(struct radv_cmd_buffer
*cmd_buffer
,
1729 VkQueryType query_type
,
1730 VkQueryControlFlags flags
,
1733 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1734 switch (query_type
) {
1735 case VK_QUERY_TYPE_OCCLUSION
:
1736 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
1738 ++cmd_buffer
->state
.active_occlusion_queries
;
1739 if (cmd_buffer
->state
.active_occlusion_queries
== 1) {
1740 if (flags
& VK_QUERY_CONTROL_PRECISE_BIT
) {
1741 /* This is the first occlusion query, enable
1742 * the hint if the precision bit is set.
1744 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1747 radv_set_db_count_control(cmd_buffer
);
1749 if ((flags
& VK_QUERY_CONTROL_PRECISE_BIT
) &&
1750 !cmd_buffer
->state
.perfect_occlusion_queries_enabled
) {
1751 /* This is not the first query, but this one
1752 * needs to enable precision, DB_COUNT_CONTROL
1753 * has to be updated accordingly.
1755 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= true;
1757 radv_set_db_count_control(cmd_buffer
);
1761 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1762 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1763 radeon_emit(cs
, va
);
1764 radeon_emit(cs
, va
>> 32);
1766 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1767 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1769 ++cmd_buffer
->state
.active_pipeline_queries
;
1770 if (cmd_buffer
->state
.active_pipeline_queries
== 1) {
1771 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1772 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_START_PIPELINE_STATS
;
1775 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1776 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1777 radeon_emit(cs
, va
);
1778 radeon_emit(cs
, va
>> 32);
1780 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1781 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1783 assert(index
< MAX_SO_STREAMS
);
1785 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1786 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1787 radeon_emit(cs
, va
);
1788 radeon_emit(cs
, va
>> 32);
1791 unreachable("beginning unhandled query type");
1796 static void emit_end_query(struct radv_cmd_buffer
*cmd_buffer
,
1797 uint64_t va
, uint64_t avail_va
,
1798 VkQueryType query_type
, uint32_t index
)
1800 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1801 switch (query_type
) {
1802 case VK_QUERY_TYPE_OCCLUSION
:
1803 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
1805 cmd_buffer
->state
.active_occlusion_queries
--;
1806 if (cmd_buffer
->state
.active_occlusion_queries
== 0) {
1807 radv_set_db_count_control(cmd_buffer
);
1809 /* Reset the perfect occlusion queries hint now that no
1810 * queries are active.
1812 cmd_buffer
->state
.perfect_occlusion_queries_enabled
= false;
1815 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1816 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
1817 radeon_emit(cs
, va
+ 8);
1818 radeon_emit(cs
, (va
+ 8) >> 32);
1821 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1822 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 16);
1824 cmd_buffer
->state
.active_pipeline_queries
--;
1825 if (cmd_buffer
->state
.active_pipeline_queries
== 0) {
1826 cmd_buffer
->state
.flush_bits
&= ~RADV_CMD_FLAG_START_PIPELINE_STATS
;
1827 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_STOP_PIPELINE_STATS
;
1829 va
+= pipelinestat_block_size
;
1831 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1832 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
1833 radeon_emit(cs
, va
);
1834 radeon_emit(cs
, va
>> 32);
1836 si_cs_emit_write_event_eop(cs
,
1837 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1838 radv_cmd_buffer_uses_mec(cmd_buffer
),
1839 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1841 EOP_DATA_SEL_VALUE_32BIT
,
1843 cmd_buffer
->gfx9_eop_bug_va
);
1845 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
:
1846 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 4);
1848 assert(index
< MAX_SO_STREAMS
);
1850 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1851 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(index
)) | EVENT_INDEX(3));
1852 radeon_emit(cs
, (va
+ 16));
1853 radeon_emit(cs
, (va
+ 16) >> 32);
1856 unreachable("ending unhandled query type");
1859 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1860 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1861 RADV_CMD_FLAG_INV_L2
|
1862 RADV_CMD_FLAG_INV_VCACHE
;
1863 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1864 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1865 RADV_CMD_FLAG_FLUSH_AND_INV_DB
;
1869 void radv_CmdBeginQueryIndexedEXT(
1870 VkCommandBuffer commandBuffer
,
1871 VkQueryPool queryPool
,
1873 VkQueryControlFlags flags
,
1876 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1877 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1878 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1879 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1881 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1883 emit_query_flush(cmd_buffer
, pool
);
1885 va
+= pool
->stride
* query
;
1887 emit_begin_query(cmd_buffer
, va
, pool
->type
, flags
, index
);
1890 void radv_CmdBeginQuery(
1891 VkCommandBuffer commandBuffer
,
1892 VkQueryPool queryPool
,
1894 VkQueryControlFlags flags
)
1896 radv_CmdBeginQueryIndexedEXT(commandBuffer
, queryPool
, query
, flags
, 0);
1899 void radv_CmdEndQueryIndexedEXT(
1900 VkCommandBuffer commandBuffer
,
1901 VkQueryPool queryPool
,
1905 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1906 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1907 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1908 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1909 va
+= pool
->stride
* query
;
1911 /* Do not need to add the pool BO to the list because the query must
1912 * currently be active, which means the BO is already in the list.
1914 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, index
);
1917 * For multiview we have to emit a query for each bit in the mask,
1918 * however the first query we emit will get the totals for all the
1919 * operations, so we don't want to get a real value in the other
1920 * queries. This emits a fake begin/end sequence so the waiting
1921 * code gets a completed query value and doesn't hang, but the
1924 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
) {
1925 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
1928 for (unsigned i
= 1; i
< util_bitcount(cmd_buffer
->state
.subpass
->view_mask
); i
++) {
1931 emit_begin_query(cmd_buffer
, va
, pool
->type
, 0, 0);
1932 emit_end_query(cmd_buffer
, va
, avail_va
, pool
->type
, 0);
1937 void radv_CmdEndQuery(
1938 VkCommandBuffer commandBuffer
,
1939 VkQueryPool queryPool
,
1942 radv_CmdEndQueryIndexedEXT(commandBuffer
, queryPool
, query
, 0);
1945 void radv_CmdWriteTimestamp(
1946 VkCommandBuffer commandBuffer
,
1947 VkPipelineStageFlagBits pipelineStage
,
1948 VkQueryPool queryPool
,
1951 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1952 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
1953 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
1954 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1955 uint64_t va
= radv_buffer_get_va(pool
->bo
);
1956 uint64_t query_va
= va
+ pool
->stride
* query
;
1958 radv_cs_add_buffer(cmd_buffer
->device
->ws
, cs
, pool
->bo
);
1960 emit_query_flush(cmd_buffer
, pool
);
1962 int num_queries
= 1;
1963 if (cmd_buffer
->state
.subpass
&& cmd_buffer
->state
.subpass
->view_mask
)
1964 num_queries
= util_bitcount(cmd_buffer
->state
.subpass
->view_mask
);
1966 ASSERTED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 28 * num_queries
);
1968 for (unsigned i
= 0; i
< num_queries
; i
++) {
1969 switch(pipelineStage
) {
1970 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1971 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
1972 radeon_emit(cs
, COPY_DATA_COUNT_SEL
| COPY_DATA_WR_CONFIRM
|
1973 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
1974 COPY_DATA_DST_SEL(V_370_MEM
));
1977 radeon_emit(cs
, query_va
);
1978 radeon_emit(cs
, query_va
>> 32);
1981 si_cs_emit_write_event_eop(cs
,
1982 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1984 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
1986 EOP_DATA_SEL_TIMESTAMP
,
1988 cmd_buffer
->gfx9_eop_bug_va
);
1991 query_va
+= pool
->stride
;
1994 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1995 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1996 RADV_CMD_FLAG_INV_L2
|
1997 RADV_CMD_FLAG_INV_VCACHE
;
1998 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1999 cmd_buffer
->active_query_flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
2000 RADV_CMD_FLAG_FLUSH_AND_INV_DB
;
2003 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);