2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
38 static unsigned get_max_db(struct radv_device
*device
)
40 unsigned num_db
= device
->physical_device
->rad_info
.num_render_backends
;
41 MAYBE_UNUSED
unsigned rb_mask
= device
->physical_device
->rad_info
.enabled_rb_mask
;
43 if (device
->physical_device
->rad_info
.chip_class
== SI
)
46 num_db
= MAX2(8, num_db
);
48 /* Otherwise we need to change the query reset procedure */
49 assert(rb_mask
== ((1ull << num_db
) - 1));
54 static void radv_break_on_count(nir_builder
*b
, nir_variable
*var
, int count
)
56 nir_ssa_def
*counter
= nir_load_var(b
, var
);
58 nir_if
*if_stmt
= nir_if_create(b
->shader
);
59 if_stmt
->condition
= nir_src_for_ssa(nir_uge(b
, counter
, nir_imm_int(b
, count
)));
60 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
62 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
64 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
65 nir_builder_instr_insert(b
, &instr
->instr
);
67 b
->cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
68 counter
= nir_iadd(b
, counter
, nir_imm_int(b
, 1));
69 nir_store_var(b
, var
, counter
, 0x1);
72 static struct nir_ssa_def
*
73 radv_load_push_int(nir_builder
*b
, unsigned offset
, const char *name
)
75 nir_intrinsic_instr
*flags
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_push_constant
);
76 flags
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, offset
));
77 flags
->num_components
= 1;
78 nir_ssa_dest_init(&flags
->instr
, &flags
->dest
, 1, 32, name
);
79 nir_builder_instr_insert(b
, &flags
->instr
);
80 return &flags
->dest
.ssa
;
84 build_occlusion_query_shader(struct radv_device
*device
) {
85 /* the shader this builds is roughly
89 * uint32_t dst_stride;
92 * uint32_t src_stride = 16 * db_count;
94 * location(binding = 0) buffer dst_buf;
95 * location(binding = 1) buffer src_buf;
98 * uint64_t result = 0;
99 * uint64_t src_offset = src_stride * global_id.x;
100 * uint64_t dst_offset = dst_stride * global_id.x;
101 * bool available = true;
102 * for (int i = 0; i < db_count; ++i) {
103 * uint64_t start = src_buf[src_offset + 16 * i];
104 * uint64_t end = src_buf[src_offset + 16 * i + 8];
105 * if ((start & (1ull << 63)) && (end & (1ull << 63)))
106 * result += end - start;
110 * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
111 * if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
112 * if (flags & VK_QUERY_RESULT_64_BIT)
113 * dst_buf[dst_offset] = result;
115 * dst_buf[dst_offset] = (uint32_t)result.
117 * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
118 * dst_buf[dst_offset + elem_size] = available;
123 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
124 b
.shader
->info
->name
= ralloc_strdup(b
.shader
, "occlusion_query");
125 b
.shader
->info
->cs
.local_size
[0] = 64;
126 b
.shader
->info
->cs
.local_size
[1] = 1;
127 b
.shader
->info
->cs
.local_size
[2] = 1;
129 nir_variable
*result
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "result");
130 nir_variable
*outer_counter
= nir_local_variable_create(b
.impl
, glsl_int_type(), "outer_counter");
131 nir_variable
*start
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "start");
132 nir_variable
*end
= nir_local_variable_create(b
.impl
, glsl_uint64_t_type(), "end");
133 nir_variable
*available
= nir_local_variable_create(b
.impl
, glsl_int_type(), "available");
134 unsigned db_count
= get_max_db(device
);
136 nir_ssa_def
*flags
= radv_load_push_int(&b
, 0, "flags");
138 nir_intrinsic_instr
*dst_buf
= nir_intrinsic_instr_create(b
.shader
,
139 nir_intrinsic_vulkan_resource_index
);
140 dst_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
141 nir_intrinsic_set_desc_set(dst_buf
, 0);
142 nir_intrinsic_set_binding(dst_buf
, 0);
143 nir_ssa_dest_init(&dst_buf
->instr
, &dst_buf
->dest
, 1, 32, NULL
);
144 nir_builder_instr_insert(&b
, &dst_buf
->instr
);
146 nir_intrinsic_instr
*src_buf
= nir_intrinsic_instr_create(b
.shader
,
147 nir_intrinsic_vulkan_resource_index
);
148 src_buf
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
149 nir_intrinsic_set_desc_set(src_buf
, 0);
150 nir_intrinsic_set_binding(src_buf
, 1);
151 nir_ssa_dest_init(&src_buf
->instr
, &src_buf
->dest
, 1, 32, NULL
);
152 nir_builder_instr_insert(&b
, &src_buf
->instr
);
154 nir_ssa_def
*invoc_id
= nir_load_system_value(&b
, nir_intrinsic_load_local_invocation_id
, 0);
155 nir_ssa_def
*wg_id
= nir_load_system_value(&b
, nir_intrinsic_load_work_group_id
, 0);
156 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
157 b
.shader
->info
->cs
.local_size
[0],
158 b
.shader
->info
->cs
.local_size
[1],
159 b
.shader
->info
->cs
.local_size
[2], 0);
160 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
161 global_id
= nir_channel(&b
, global_id
, 0); // We only care about x here.
163 nir_ssa_def
*input_stride
= nir_imm_int(&b
, db_count
* 16);
164 nir_ssa_def
*input_base
= nir_imul(&b
, input_stride
, global_id
);
165 nir_ssa_def
*output_stride
= radv_load_push_int(&b
, 4, "output_stride");
166 nir_ssa_def
*output_base
= nir_imul(&b
, output_stride
, global_id
);
169 nir_store_var(&b
, result
, nir_imm_int64(&b
, 0), 0x1);
170 nir_store_var(&b
, outer_counter
, nir_imm_int(&b
, 0), 0x1);
171 nir_store_var(&b
, available
, nir_imm_int(&b
, 1), 0x1);
173 nir_loop
*outer_loop
= nir_loop_create(b
.shader
);
174 nir_builder_cf_insert(&b
, &outer_loop
->cf_node
);
175 b
.cursor
= nir_after_cf_list(&outer_loop
->body
);
177 nir_ssa_def
*current_outer_count
= nir_load_var(&b
, outer_counter
);
178 radv_break_on_count(&b
, outer_counter
, db_count
);
180 nir_ssa_def
*load_offset
= nir_imul(&b
, current_outer_count
, nir_imm_int(&b
, 16));
181 load_offset
= nir_iadd(&b
, input_base
, load_offset
);
183 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ssbo
);
184 load
->src
[0] = nir_src_for_ssa(&src_buf
->dest
.ssa
);
185 load
->src
[1] = nir_src_for_ssa(load_offset
);
186 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 64, NULL
);
187 load
->num_components
= 2;
188 nir_builder_instr_insert(&b
, &load
->instr
);
190 const unsigned swizzle0
[] = {0,0,0,0};
191 const unsigned swizzle1
[] = {1,1,1,1};
192 nir_store_var(&b
, start
, nir_swizzle(&b
, &load
->dest
.ssa
, swizzle0
, 1, false), 0x1);
193 nir_store_var(&b
, end
, nir_swizzle(&b
, &load
->dest
.ssa
, swizzle1
, 1, false), 0x1);
195 nir_ssa_def
*start_done
= nir_ilt(&b
, nir_load_var(&b
, start
), nir_imm_int64(&b
, 0));
196 nir_ssa_def
*end_done
= nir_ilt(&b
, nir_load_var(&b
, end
), nir_imm_int64(&b
, 0));
198 nir_if
*update_if
= nir_if_create(b
.shader
);
199 update_if
->condition
= nir_src_for_ssa(nir_iand(&b
, start_done
, end_done
));
200 nir_cf_node_insert(b
.cursor
, &update_if
->cf_node
);
202 b
.cursor
= nir_after_cf_list(&update_if
->then_list
);
204 nir_store_var(&b
, result
,
205 nir_iadd(&b
, nir_load_var(&b
, result
),
206 nir_isub(&b
, nir_load_var(&b
, end
),
207 nir_load_var(&b
, start
))), 0x1);
209 b
.cursor
= nir_after_cf_list(&update_if
->else_list
);
211 nir_store_var(&b
, available
, nir_imm_int(&b
, 0), 0x1);
213 b
.cursor
= nir_after_cf_node(&outer_loop
->cf_node
);
215 /* Store the result if complete or if partial results have been requested. */
217 nir_ssa_def
*result_is_64bit
= nir_iand(&b
, flags
,
218 nir_imm_int(&b
, VK_QUERY_RESULT_64_BIT
));
219 nir_ssa_def
*result_size
= nir_bcsel(&b
, result_is_64bit
, nir_imm_int(&b
, 8), nir_imm_int(&b
, 4));
221 nir_if
*store_if
= nir_if_create(b
.shader
);
222 store_if
->condition
= nir_src_for_ssa(nir_ior(&b
, nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_PARTIAL_BIT
)), nir_load_var(&b
, available
)));
223 nir_cf_node_insert(b
.cursor
, &store_if
->cf_node
);
225 b
.cursor
= nir_after_cf_list(&store_if
->then_list
);
227 nir_if
*store_64bit_if
= nir_if_create(b
.shader
);
228 store_64bit_if
->condition
= nir_src_for_ssa(result_is_64bit
);
229 nir_cf_node_insert(b
.cursor
, &store_64bit_if
->cf_node
);
231 b
.cursor
= nir_after_cf_list(&store_64bit_if
->then_list
);
233 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
234 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, result
));
235 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
236 store
->src
[2] = nir_src_for_ssa(output_base
);
237 nir_intrinsic_set_write_mask(store
, 0x1);
238 store
->num_components
= 1;
239 nir_builder_instr_insert(&b
, &store
->instr
);
241 b
.cursor
= nir_after_cf_list(&store_64bit_if
->else_list
);
243 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
244 store
->src
[0] = nir_src_for_ssa(nir_u2u32(&b
, nir_load_var(&b
, result
)));
245 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
246 store
->src
[2] = nir_src_for_ssa(output_base
);
247 nir_intrinsic_set_write_mask(store
, 0x1);
248 store
->num_components
= 1;
249 nir_builder_instr_insert(&b
, &store
->instr
);
251 b
.cursor
= nir_after_cf_node(&store_if
->cf_node
);
253 /* Store the availability bit if requested. */
255 nir_if
*availability_if
= nir_if_create(b
.shader
);
256 availability_if
->condition
= nir_src_for_ssa(nir_iand(&b
, flags
, nir_imm_int(&b
, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)));
257 nir_cf_node_insert(b
.cursor
, &availability_if
->cf_node
);
259 b
.cursor
= nir_after_cf_list(&availability_if
->then_list
);
261 store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_store_ssbo
);
262 store
->src
[0] = nir_src_for_ssa(nir_load_var(&b
, available
));
263 store
->src
[1] = nir_src_for_ssa(&dst_buf
->dest
.ssa
);
264 store
->src
[2] = nir_src_for_ssa(nir_iadd(&b
, result_size
, output_base
));
265 nir_intrinsic_set_write_mask(store
, 0x1);
266 store
->num_components
= 1;
267 nir_builder_instr_insert(&b
, &store
->instr
);
272 VkResult
radv_device_init_meta_query_state(struct radv_device
*device
)
275 struct radv_shader_module occlusion_cs
= { .nir
= NULL
};
277 zero(device
->meta_state
.query
);
279 occlusion_cs
.nir
= build_occlusion_query_shader(device
);
281 VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info
= {
282 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
284 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
287 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
288 .descriptorCount
= 1,
289 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
290 .pImmutableSamplers
= NULL
294 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
295 .descriptorCount
= 1,
296 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
297 .pImmutableSamplers
= NULL
302 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
303 &occlusion_ds_create_info
,
304 &device
->meta_state
.alloc
,
305 &device
->meta_state
.query
.ds_layout
);
306 if (result
!= VK_SUCCESS
)
309 VkPipelineLayoutCreateInfo occlusion_pl_create_info
= {
310 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
312 .pSetLayouts
= &device
->meta_state
.query
.ds_layout
,
313 .pushConstantRangeCount
= 1,
314 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 8},
317 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
318 &occlusion_pl_create_info
,
319 &device
->meta_state
.alloc
,
320 &device
->meta_state
.query
.p_layout
);
321 if (result
!= VK_SUCCESS
)
324 VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage
= {
325 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
326 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
327 .module
= radv_shader_module_to_handle(&occlusion_cs
),
329 .pSpecializationInfo
= NULL
,
332 VkComputePipelineCreateInfo occlusion_vk_pipeline_info
= {
333 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
334 .stage
= occlusion_pipeline_shader_stage
,
336 .layout
= device
->meta_state
.query
.p_layout
,
339 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
340 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
341 1, &occlusion_vk_pipeline_info
, NULL
,
342 &device
->meta_state
.query
.occlusion_query_pipeline
);
343 if (result
!= VK_SUCCESS
)
348 radv_device_finish_meta_query_state(device
);
349 ralloc_free(occlusion_cs
.nir
);
353 void radv_device_finish_meta_query_state(struct radv_device
*device
)
355 if (device
->meta_state
.query
.occlusion_query_pipeline
)
356 radv_DestroyPipeline(radv_device_to_handle(device
),
357 device
->meta_state
.query
.occlusion_query_pipeline
,
358 &device
->meta_state
.alloc
);
360 if (device
->meta_state
.query
.p_layout
)
361 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
362 device
->meta_state
.query
.p_layout
,
363 &device
->meta_state
.alloc
);
365 if (device
->meta_state
.query
.ds_layout
)
366 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
367 device
->meta_state
.query
.ds_layout
,
368 &device
->meta_state
.alloc
);
371 static void occlusion_query_shader(struct radv_cmd_buffer
*cmd_buffer
,
372 struct radeon_winsys_bo
*src_bo
,
373 struct radeon_winsys_bo
*dst_bo
,
374 uint64_t src_offset
, uint64_t dst_offset
,
375 uint32_t dst_stride
, uint32_t count
,
378 struct radv_device
*device
= cmd_buffer
->device
;
379 struct radv_meta_saved_compute_state saved_state
;
380 unsigned stride
= get_max_db(device
) * 16;
383 radv_meta_save_compute(&saved_state
, cmd_buffer
, 4);
385 radv_temp_descriptor_set_create(device
, cmd_buffer
,
386 device
->meta_state
.query
.ds_layout
,
389 struct radv_buffer dst_buffer
= {
391 .offset
= dst_offset
,
392 .size
= dst_stride
* count
395 struct radv_buffer src_buffer
= {
397 .offset
= src_offset
,
398 .size
= stride
* count
401 radv_UpdateDescriptorSets(radv_device_to_handle(device
),
403 (VkWriteDescriptorSet
[]) {
405 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
408 .dstArrayElement
= 0,
409 .descriptorCount
= 1,
410 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
411 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
412 .buffer
= radv_buffer_to_handle(&dst_buffer
),
414 .range
= VK_WHOLE_SIZE
418 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
421 .dstArrayElement
= 0,
422 .descriptorCount
= 1,
423 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
,
424 .pBufferInfo
= &(VkDescriptorBufferInfo
) {
425 .buffer
= radv_buffer_to_handle(&src_buffer
),
427 .range
= VK_WHOLE_SIZE
432 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
433 VK_PIPELINE_BIND_POINT_COMPUTE
,
434 device
->meta_state
.query
.occlusion_query_pipeline
);
436 radv_CmdBindDescriptorSets(radv_cmd_buffer_to_handle(cmd_buffer
),
437 VK_PIPELINE_BIND_POINT_COMPUTE
,
438 device
->meta_state
.query
.p_layout
, 0, 1,
449 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer
),
450 device
->meta_state
.query
.p_layout
,
451 VK_SHADER_STAGE_COMPUTE_BIT
, 0, sizeof(push_constants
),
454 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
455 RADV_CMD_FLAG_INV_VMEM_L1
;
457 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
458 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
;
460 radv_unaligned_dispatch(cmd_buffer
, count
, 1, 1);
462 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_INV_GLOBAL_L2
|
463 RADV_CMD_FLAG_INV_VMEM_L1
|
464 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
;
466 radv_temp_descriptor_set_destroy(device
, ds
);
468 radv_meta_restore_compute(&saved_state
, cmd_buffer
, 4);
471 VkResult
radv_CreateQueryPool(
473 const VkQueryPoolCreateInfo
* pCreateInfo
,
474 const VkAllocationCallbacks
* pAllocator
,
475 VkQueryPool
* pQueryPool
)
477 RADV_FROM_HANDLE(radv_device
, device
, _device
);
479 struct radv_query_pool
*pool
= vk_alloc2(&device
->alloc
, pAllocator
,
481 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
484 return VK_ERROR_OUT_OF_HOST_MEMORY
;
487 switch(pCreateInfo
->queryType
) {
488 case VK_QUERY_TYPE_OCCLUSION
:
489 pool
->stride
= 16 * get_max_db(device
);
491 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
492 pool
->stride
= 16 * 11;
494 case VK_QUERY_TYPE_TIMESTAMP
:
498 unreachable("creating unhandled query type");
501 pool
->type
= pCreateInfo
->queryType
;
502 pool
->availability_offset
= pool
->stride
* pCreateInfo
->queryCount
;
503 size
= pool
->availability_offset
;
504 if (pCreateInfo
->queryType
== VK_QUERY_TYPE_TIMESTAMP
)
505 size
+= 4 * pCreateInfo
->queryCount
;
507 pool
->bo
= device
->ws
->buffer_create(device
->ws
, size
,
508 64, RADEON_DOMAIN_GTT
, 0);
511 vk_free2(&device
->alloc
, pAllocator
, pool
);
512 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
515 pool
->ptr
= device
->ws
->buffer_map(pool
->bo
);
518 device
->ws
->buffer_destroy(pool
->bo
);
519 vk_free2(&device
->alloc
, pAllocator
, pool
);
520 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
522 memset(pool
->ptr
, 0, size
);
524 *pQueryPool
= radv_query_pool_to_handle(pool
);
528 void radv_DestroyQueryPool(
531 const VkAllocationCallbacks
* pAllocator
)
533 RADV_FROM_HANDLE(radv_device
, device
, _device
);
534 RADV_FROM_HANDLE(radv_query_pool
, pool
, _pool
);
539 device
->ws
->buffer_destroy(pool
->bo
);
540 vk_free2(&device
->alloc
, pAllocator
, pool
);
543 VkResult
radv_GetQueryPoolResults(
545 VkQueryPool queryPool
,
551 VkQueryResultFlags flags
)
553 RADV_FROM_HANDLE(radv_device
, device
, _device
);
554 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
556 VkResult result
= VK_SUCCESS
;
558 for(unsigned i
= 0; i
< queryCount
; ++i
, data
+= stride
) {
560 unsigned query
= firstQuery
+ i
;
561 char *src
= pool
->ptr
+ query
* pool
->stride
;
564 switch (pool
->type
) {
565 case VK_QUERY_TYPE_TIMESTAMP
: {
566 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
567 while(!*(volatile uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
))
571 available
= *(uint32_t*)(pool
->ptr
+ pool
->availability_offset
+ 4 * query
);
572 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
573 result
= VK_NOT_READY
;
578 if (flags
& VK_QUERY_RESULT_64_BIT
) {
579 *(uint64_t*)dest
= *(uint64_t*)src
;
582 *(uint32_t*)dest
= *(uint32_t*)src
;
587 case VK_QUERY_TYPE_OCCLUSION
: {
588 volatile uint64_t const *src64
= (volatile uint64_t const *)src
;
590 int db_count
= get_max_db(device
);
593 for (int i
= 0; i
< db_count
; ++i
) {
596 start
= src64
[2 * i
];
597 end
= src64
[2 * i
+ 1];
598 } while ((!(start
& (1ull << 63)) || !(end
& (1ull << 63))) && (flags
& VK_QUERY_RESULT_WAIT_BIT
));
600 if (!(start
& (1ull << 63)) || !(end
& (1ull << 63)))
603 result
+= end
- start
;
607 if (!available
&& !(flags
& VK_QUERY_RESULT_PARTIAL_BIT
)) {
608 result
= VK_NOT_READY
;
613 if (flags
& VK_QUERY_RESULT_64_BIT
) {
614 *(uint64_t*)dest
= result
;
617 *(uint32_t*)dest
= result
;
622 unreachable("trying to get results of unhandled query type");
626 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
627 if (flags
& VK_QUERY_RESULT_64_BIT
) {
628 *(uint64_t*)dest
= available
;
630 *(uint32_t*)dest
= available
;
638 void radv_CmdCopyQueryPoolResults(
639 VkCommandBuffer commandBuffer
,
640 VkQueryPool queryPool
,
644 VkDeviceSize dstOffset
,
646 VkQueryResultFlags flags
)
648 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
649 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
650 RADV_FROM_HANDLE(radv_buffer
, dst_buffer
, dstBuffer
);
651 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
652 unsigned elem_size
= (flags
& VK_QUERY_RESULT_64_BIT
) ? 8 : 4;
653 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
654 uint64_t dest_va
= cmd_buffer
->device
->ws
->buffer_get_va(dst_buffer
->bo
);
655 dest_va
+= dst_buffer
->offset
+ dstOffset
;
657 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, pool
->bo
, 8);
658 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, dst_buffer
->bo
, 8);
660 switch (pool
->type
) {
661 case VK_QUERY_TYPE_OCCLUSION
:
662 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
663 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
664 unsigned query
= firstQuery
+ i
;
665 uint64_t src_va
= va
+ query
* pool
->stride
+ pool
->stride
- 4;
667 /* Waits on the upper word of the last DB entry */
668 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
669 radeon_emit(cs
, 5 | WAIT_REG_MEM_MEM_SPACE(1));
670 radeon_emit(cs
, src_va
);
671 radeon_emit(cs
, src_va
>> 32);
672 radeon_emit(cs
, 0x80000000); /* reference value */
673 radeon_emit(cs
, 0xffffffff); /* mask */
674 radeon_emit(cs
, 4); /* poll interval */
677 occlusion_query_shader(cmd_buffer
, pool
->bo
, dst_buffer
->bo
,
678 firstQuery
* pool
->stride
,
679 dst_buffer
->offset
+ dstOffset
, stride
,
682 case VK_QUERY_TYPE_TIMESTAMP
:
683 for(unsigned i
= 0; i
< queryCount
; ++i
, dest_va
+= stride
) {
684 unsigned query
= firstQuery
+ i
;
685 uint64_t local_src_va
= va
+ query
* pool
->stride
;
687 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 19);
690 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
691 /* TODO, not sure if there is any case where we won't always be ready yet */
692 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
694 /* This waits on the ME. All copies below are done on the ME */
695 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
696 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
697 radeon_emit(cs
, avail_va
);
698 radeon_emit(cs
, avail_va
>> 32);
699 radeon_emit(cs
, 1); /* reference value */
700 radeon_emit(cs
, 0xffffffff); /* mask */
701 radeon_emit(cs
, 4); /* poll interval */
703 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
704 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
705 uint64_t avail_dest_va
= dest_va
+ elem_size
;
707 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
708 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
709 COPY_DATA_DST_SEL(COPY_DATA_MEM
));
710 radeon_emit(cs
, avail_va
);
711 radeon_emit(cs
, avail_va
>> 32);
712 radeon_emit(cs
, avail_dest_va
);
713 radeon_emit(cs
, avail_dest_va
>> 32);
716 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
717 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
718 COPY_DATA_DST_SEL(COPY_DATA_MEM
) |
719 ((flags
& VK_QUERY_RESULT_64_BIT
) ? COPY_DATA_COUNT_SEL
: 0));
720 radeon_emit(cs
, local_src_va
);
721 radeon_emit(cs
, local_src_va
>> 32);
722 radeon_emit(cs
, dest_va
);
723 radeon_emit(cs
, dest_va
>> 32);
726 assert(cs
->cdw
<= cdw_max
);
730 unreachable("trying to get results of unhandled query type");
735 void radv_CmdResetQueryPool(
736 VkCommandBuffer commandBuffer
,
737 VkQueryPool queryPool
,
741 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
742 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
743 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
745 cmd_buffer
->device
->ws
->cs_add_buffer(cmd_buffer
->cs
, pool
->bo
, 8);
747 si_cp_dma_clear_buffer(cmd_buffer
, va
+ firstQuery
* pool
->stride
,
748 queryCount
* pool
->stride
, 0);
749 if (pool
->type
== VK_QUERY_TYPE_TIMESTAMP
)
750 si_cp_dma_clear_buffer(cmd_buffer
, va
+ pool
->availability_offset
+ firstQuery
* 4,
754 void radv_CmdBeginQuery(
755 VkCommandBuffer commandBuffer
,
756 VkQueryPool queryPool
,
758 VkQueryControlFlags flags
)
760 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
761 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
762 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
763 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
764 va
+= pool
->stride
* query
;
766 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 8);
768 switch (pool
->type
) {
769 case VK_QUERY_TYPE_OCCLUSION
:
770 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 7);
772 ++cmd_buffer
->state
.active_occlusion_queries
;
773 if (cmd_buffer
->state
.active_occlusion_queries
== 1)
774 radv_set_db_count_control(cmd_buffer
);
776 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
777 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
779 radeon_emit(cs
, va
>> 32);
782 unreachable("beginning unhandled query type");
787 void radv_CmdEndQuery(
788 VkCommandBuffer commandBuffer
,
789 VkQueryPool queryPool
,
792 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
793 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
794 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
795 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
796 va
+= pool
->stride
* query
;
798 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 8);
800 switch (pool
->type
) {
801 case VK_QUERY_TYPE_OCCLUSION
:
802 radeon_check_space(cmd_buffer
->device
->ws
, cs
, 14);
804 cmd_buffer
->state
.active_occlusion_queries
--;
805 if (cmd_buffer
->state
.active_occlusion_queries
== 0)
806 radv_set_db_count_control(cmd_buffer
);
808 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
809 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
810 radeon_emit(cs
, va
+ 8);
811 radeon_emit(cs
, (va
+ 8) >> 32);
815 unreachable("ending unhandled query type");
819 void radv_CmdWriteTimestamp(
820 VkCommandBuffer commandBuffer
,
821 VkPipelineStageFlagBits pipelineStage
,
822 VkQueryPool queryPool
,
825 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, commandBuffer
);
826 RADV_FROM_HANDLE(radv_query_pool
, pool
, queryPool
);
827 bool mec
= radv_cmd_buffer_uses_mec(cmd_buffer
);
828 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
829 uint64_t va
= cmd_buffer
->device
->ws
->buffer_get_va(pool
->bo
);
830 uint64_t avail_va
= va
+ pool
->availability_offset
+ 4 * query
;
831 uint64_t query_va
= va
+ pool
->stride
* query
;
833 cmd_buffer
->device
->ws
->cs_add_buffer(cs
, pool
->bo
, 5);
835 MAYBE_UNUSED
unsigned cdw_max
= radeon_check_space(cmd_buffer
->device
->ws
, cs
, 12);
838 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, 5, 0));
839 radeon_emit(cs
, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
840 radeon_emit(cs
, 3 << 29);
841 radeon_emit(cs
, query_va
);
842 radeon_emit(cs
, query_va
>> 32);
846 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
847 radeon_emit(cs
, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
848 radeon_emit(cs
, query_va
);
849 radeon_emit(cs
, (3 << 29) | ((query_va
>> 32) & 0xFFFF));
854 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
855 radeon_emit(cs
, S_370_DST_SEL(mec
? V_370_MEM_ASYNC
: V_370_MEMORY_SYNC
) |
856 S_370_WR_CONFIRM(1) |
857 S_370_ENGINE_SEL(V_370_ME
));
858 radeon_emit(cs
, avail_va
);
859 radeon_emit(cs
, avail_va
>> 32);
862 assert(cmd_buffer
->cs
->cdw
<= cdw_max
);