static const int pipelinestat_block_size = 11 * 8;
static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
-static unsigned get_max_db(struct radv_device *device)
-{
- unsigned num_db = device->physical_device->rad_info.num_render_backends;
- MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
-
- /* Otherwise we need to change the query reset procedure */
- assert(rb_mask == ((1ull << num_db) - 1));
-
- return num_db;
-}
-
-
static nir_ssa_def *nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
{
return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
* uint64_t dst_offset = dst_stride * global_id.x;
* bool available = true;
* for (int i = 0; i < db_count; ++i) {
- * uint64_t start = src_buf[src_offset + 16 * i];
- * uint64_t end = src_buf[src_offset + 16 * i + 8];
- * if ((start & (1ull << 63)) && (end & (1ull << 63)))
- * result += end - start;
- * else
- * available = false;
+ * if (enabled_rb_mask & (1 << i)) {
+ * uint64_t start = src_buf[src_offset + 16 * i];
+ * uint64_t end = src_buf[src_offset + 16 * i + 8];
+ * if ((start & (1ull << 63)) && (end & (1ull << 63)))
+ * result += end - start;
+ * else
+ * available = false;
+ * }
* }
* uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
* if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
- unsigned db_count = get_max_db(device);
+ unsigned enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
+ unsigned db_count = device->physical_device->rad_info.num_render_backends;
nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ dst_buf->num_components = 1;
nir_intrinsic_set_desc_set(dst_buf, 0);
nir_intrinsic_set_binding(dst_buf, 0);
- nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &dst_buf->instr);
nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ src_buf->num_components = 1;
nir_intrinsic_set_desc_set(src_buf, 0);
nir_intrinsic_set_binding(src_buf, 1);
- nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &src_buf->instr);
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
+ nir_ssa_def *enabled_cond =
+ nir_iand(&b, nir_imm_int(&b, enabled_rb_mask),
+ nir_ishl(&b, nir_imm_int(&b, 1), current_outer_count));
+
+ nir_if *enabled_if = nir_if_create(b.shader);
+ enabled_if->condition = nir_src_for_ssa(nir_i2b(&b, enabled_cond));
+ nir_cf_node_insert(b.cursor, &enabled_if->cf_node);
+
+ b.cursor = nir_after_cf_list(&enabled_if->then_list);
+
nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
load_offset = nir_iadd(&b, input_base, load_offset);
nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ dst_buf->num_components = 1;;
nir_intrinsic_set_desc_set(dst_buf, 0);
nir_intrinsic_set_binding(dst_buf, 0);
- nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &dst_buf->instr);
nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ src_buf->num_components = 1;
nir_intrinsic_set_desc_set(src_buf, 0);
nir_intrinsic_set_binding(src_buf, 1);
- nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &src_buf->instr);
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ dst_buf->num_components = 1;
nir_intrinsic_set_desc_set(dst_buf, 0);
nir_intrinsic_set_binding(dst_buf, 0);
- nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &dst_buf->instr);
nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
nir_intrinsic_vulkan_resource_index);
src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+ src_buf->num_components = 1;
nir_intrinsic_set_desc_set(src_buf, 0);
nir_intrinsic_set_binding(src_buf, 1);
- nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
nir_builder_instr_insert(&b, &src_buf->instr);
/* Compute global ID. */
- nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
- nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
+ nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
+ nir_ssa_def *wg_id = nir_load_work_group_id(&b);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
b.shader->info.cs.local_size[0],
b.shader->info.cs.local_size[1],
VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
&push_constants);
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
- RADV_CMD_FLAG_INV_VMEM_L1;
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_L2 |
+ RADV_CMD_FLAG_INV_VCACHE;
if (flags & VK_QUERY_RESULT_WAIT_BIT)
cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
- ? TIMESTAMP_NOT_READY : 0;
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
switch(pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
- pool->stride = 16 * get_max_db(device);
+ pool->stride = 16 * device->physical_device->rad_info.num_render_backends;
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
pool->stride = pipelinestat_block_size * 2;
pool->size += 4 * pCreateInfo->queryCount;
pool->bo = device->ws->buffer_create(device->ws, pool->size,
- 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING,
+ RADV_BO_PRIORITY_QUERY_POOL);
if (!pool->bo) {
vk_free2(&device->alloc, pAllocator, pool);
vk_free2(&device->alloc, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
- memset(pool->ptr, initial_value, pool->size);
*pQueryPool = radv_query_pool_to_handle(pool);
return VK_SUCCESS;
char *src = pool->ptr + query * pool->stride;
uint32_t available;
- if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
- if (flags & VK_QUERY_RESULT_WAIT_BIT)
- while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
- ;
- available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
- }
-
switch (pool->type) {
case VK_QUERY_TYPE_TIMESTAMP: {
available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
}
- if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+ if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
result = VK_NOT_READY;
- break;
-
- }
if (flags & VK_QUERY_RESULT_64_BIT) {
- *(uint64_t*)dest = *(uint64_t*)src;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint64_t*)dest = *(uint64_t*)src;
dest += 8;
} else {
- *(uint32_t*)dest = *(uint32_t*)src;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint32_t*)dest = *(uint32_t*)src;
dest += 4;
}
break;
}
case VK_QUERY_TYPE_OCCLUSION: {
volatile uint64_t const *src64 = (volatile uint64_t const *)src;
+ uint32_t db_count = device->physical_device->rad_info.num_render_backends;
+ uint32_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
uint64_t sample_count = 0;
- int db_count = get_max_db(device);
available = 1;
for (int i = 0; i < db_count; ++i) {
uint64_t start, end;
+
+ if (!(enabled_rb_mask & (1 << i)))
+ continue;
+
do {
start = src64[2 * i];
end = src64[2 * i + 1];
}
}
- if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+ if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
result = VK_NOT_READY;
- break;
-
- }
if (flags & VK_QUERY_RESULT_64_BIT) {
- *(uint64_t*)dest = sample_count;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint64_t*)dest = sample_count;
dest += 8;
} else {
- *(uint32_t*)dest = sample_count;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint32_t*)dest = sample_count;
dest += 4;
}
break;
}
case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
- if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
- result = VK_NOT_READY;
- break;
+ if (flags & VK_QUERY_RESULT_WAIT_BIT)
+ while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
+ ;
+ available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
- }
+ if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ result = VK_NOT_READY;
const uint64_t *start = (uint64_t*)src;
const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
if (flags & VK_QUERY_RESULT_64_BIT) {
uint64_t *dst = (uint64_t*)dest;
dest += util_bitcount(pool->pipeline_stats_mask) * 8;
- for(int i = 0; i < 11; ++i)
- if(pool->pipeline_stats_mask & (1u << i))
- *dst++ = stop[pipeline_statistics_indices[i]] -
- start[pipeline_statistics_indices[i]];
+ for(int i = 0; i < 11; ++i) {
+ if(pool->pipeline_stats_mask & (1u << i)) {
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *dst = stop[pipeline_statistics_indices[i]] -
+ start[pipeline_statistics_indices[i]];
+ dst++;
+ }
+ }
} else {
uint32_t *dst = (uint32_t*)dest;
dest += util_bitcount(pool->pipeline_stats_mask) * 4;
- for(int i = 0; i < 11; ++i)
- if(pool->pipeline_stats_mask & (1u << i))
- *dst++ = stop[pipeline_statistics_indices[i]] -
- start[pipeline_statistics_indices[i]];
+ for(int i = 0; i < 11; ++i) {
+ if(pool->pipeline_stats_mask & (1u << i)) {
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *dst = stop[pipeline_statistics_indices[i]] -
+ start[pipeline_statistics_indices[i]];
+ dst++;
+ }
+ }
}
break;
}
available = 0;
}
- if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+ if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
result = VK_NOT_READY;
- break;
- }
num_primitives_written = src64[3] - src64[1];
primitive_storage_needed = src64[2] - src64[0];
if (flags & VK_QUERY_RESULT_64_BIT) {
- *(uint64_t *)dest = num_primitives_written;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint64_t *)dest = num_primitives_written;
dest += 8;
- *(uint64_t *)dest = primitive_storage_needed;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint64_t *)dest = primitive_storage_needed;
dest += 8;
} else {
- *(uint32_t *)dest = num_primitives_written;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint32_t *)dest = num_primitives_written;
dest += 4;
- *(uint32_t *)dest = primitive_storage_needed;
+ if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+ *(uint32_t *)dest = primitive_storage_needed;
dest += 4;
}
break;
return result;
}
+static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_query_pool *pool)
+{
+ if (cmd_buffer->pending_reset_query) {
+ if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
+ /* Only need to flush caches if the query pool size is
+ * large enough to be resetted using the compute shader
+ * path. Small pools don't need any cache flushes
+ * because we use a CP dma clear.
+ */
+ si_emit_cache_flush(cmd_buffer);
+ }
+ }
+}
+
void radv_CmdCopyQueryPoolResults(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
+ /* From the Vulkan spec 1.1.108:
+ *
+ * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
+ * previous uses of vkCmdResetQueryPool in the same queue, without any
+ * additional synchronization."
+ *
+ * So, we have to flush the caches if the compute shader path was used.
+ */
+ emit_query_flush(cmd_buffer, pool);
+
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
? TIMESTAMP_NOT_READY : 0;
uint32_t flush_bits = 0;
+ /* Make sure to sync all previous work if the given command buffer has
+ * pending active queries. Otherwise the GPU might write queries data
+ * after the reset operation.
+ */
+ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
firstQuery * pool->stride,
queryCount * pool->stride, value);
}
}
+void radv_ResetQueryPoolEXT(
+ VkDevice _device,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount)
+{
+ RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+
+ uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
+ ? TIMESTAMP_NOT_READY : 0;
+ uint32_t *data = (uint32_t*)(pool->ptr + firstQuery * pool->stride);
+ uint32_t *data_end = (uint32_t*)(pool->ptr + (firstQuery + queryCount) * pool->stride);
+
+ for(uint32_t *p = data; p != data_end; ++p)
+ *p = value;
+
+ if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
+ memset(pool->ptr + pool->availability_offset + firstQuery * 4,
+ 0, queryCount * 4);
+ }
+}
+
static unsigned event_type_for_stream(unsigned stream)
{
switch (stream) {
}
}
-static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
- struct radv_query_pool *pool)
-{
- if (cmd_buffer->pending_reset_query) {
- if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
- /* Only need to flush caches if the query pool size is
- * large enough to be resetted using the compute shader
- * path. Small pools don't need any cache flushes
- * because we use a CP dma clear.
- */
- si_emit_cache_flush(cmd_buffer);
- }
- }
-}
-
static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
uint64_t va,
VkQueryType query_type,
radv_cmd_buffer_uses_mec(cmd_buffer),
V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_VALUE_32BIT,
- avail_va, 0, 1,
+ avail_va, 1,
cmd_buffer->gfx9_eop_bug_va);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
default:
unreachable("ending unhandled query type");
}
+
+ cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_INV_L2 |
+ RADV_CMD_FLAG_INV_VCACHE;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ }
}
void radv_CmdBeginQueryIndexedEXT(
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
- COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
+ COPY_DATA_DST_SEL(V_370_MEM));
radeon_emit(cs, 0);
radeon_emit(cs, 0);
radeon_emit(cs, query_va);
mec,
V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_TIMESTAMP,
- query_va, 0, 0,
+ query_va, 0,
cmd_buffer->gfx9_eop_bug_va);
break;
}