X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_query.c;h=63a2ab773a83f48a0b32d819f79a4a137d3239c0;hb=fc9248e13e20ed49e9e672b6cb82fa5b05c48e61;hp=276cc1c42d7907ac9721ddf796879506c282e73d;hpb=e8997287695119e61709daa89ded28e393ece533;p=mesa.git diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 276cc1c42d7..63a2ab773a8 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -51,6 +51,12 @@ static unsigned get_max_db(struct radv_device *device) return num_db; } + +static nir_ssa_def *nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag) +{ + return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag))); +} + static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count) { nir_ssa_def *counter = nir_load_var(b, var); @@ -132,7 +138,7 @@ build_occlusion_query_shader(struct radv_device *device) { nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter"); nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start"); nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end"); - nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available"); + nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available"); unsigned db_count = get_max_db(device); nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags"); @@ -140,17 +146,19 @@ build_occlusion_query_shader(struct radv_device *device) { nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + dst_buf->num_components = 1; nir_intrinsic_set_desc_set(dst_buf, 0); nir_intrinsic_set_binding(dst_buf, 0); - nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &dst_buf->instr); nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + src_buf->num_components = 1; nir_intrinsic_set_desc_set(src_buf, 0); nir_intrinsic_set_binding(src_buf, 1); - nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &src_buf->instr); nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b); @@ -170,7 +178,7 @@ build_occlusion_query_shader(struct radv_device *device) { nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1); nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1); - nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1); + nir_store_var(&b, available, nir_imm_true(&b), 0x1); nir_loop *outer_loop = nir_loop_create(b.shader); nir_builder_cf_insert(&b, &outer_loop->cf_node); @@ -208,18 +216,17 @@ build_occlusion_query_shader(struct radv_device *device) { b.cursor = nir_after_cf_list(&update_if->else_list); - nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1); + nir_store_var(&b, available, nir_imm_false(&b), 0x1); b.cursor = nir_after_cf_node(&outer_loop->cf_node); /* Store the result if complete or if partial results have been requested. */ - nir_ssa_def *result_is_64bit = nir_iand(&b, flags, - nir_imm_int(&b, VK_QUERY_RESULT_64_BIT)); + nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT); nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4)); nir_if *store_if = nir_if_create(b.shader); - store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available))); + store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available))); nir_cf_node_insert(b.cursor, &store_if->cf_node); b.cursor = nir_after_cf_list(&store_if->then_list); @@ -253,13 +260,13 @@ build_occlusion_query_shader(struct radv_device *device) { /* Store the availability bit if requested. */ nir_if *availability_if = nir_if_create(b.shader); - availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))); + availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)); nir_cf_node_insert(b.cursor, &availability_if->cf_node); b.cursor = nir_after_cf_list(&availability_if->then_list); store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo); - store->src[0] = nir_src_for_ssa(nir_load_var(&b, available)); + store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available))); store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa); store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base)); nir_intrinsic_set_write_mask(store, 0x1); @@ -291,11 +298,11 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { * uint64_t dst_offset = dst_base; * uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4; * uint32_t elem_count = stats_mask >> 16; - * uint32_t available = src_buf[avail_offset + 4 * global_id.x]; + * uint32_t available32 = src_buf[avail_offset + 4 * global_id.x]; * if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) { - * dst_buf[dst_offset + elem_count * elem_size] = available; + * dst_buf[dst_offset + elem_count * elem_size] = available32; * } - * if (available) { + * if ((bool)available32) { * // repeat 11 times: * if (stats_mask & (1 << 0)) { * uint64_t start = src_buf[src_offset + 8 * indices[0]]; @@ -330,17 +337,19 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + dst_buf->num_components = 1;; nir_intrinsic_set_desc_set(dst_buf, 0); nir_intrinsic_set_binding(dst_buf, 0); - nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &dst_buf->instr); nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + src_buf->num_components = 1; nir_intrinsic_set_desc_set(src_buf, 0); nir_intrinsic_set_binding(src_buf, 1); - nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &src_buf->instr); nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b); @@ -367,23 +376,22 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL); load->num_components = 1; nir_builder_instr_insert(&b, &load->instr); - nir_ssa_def *available = &load->dest.ssa; + nir_ssa_def *available32 = &load->dest.ssa; - nir_ssa_def *result_is_64bit = nir_iand(&b, flags, - nir_imm_int(&b, VK_QUERY_RESULT_64_BIT)); + nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT); nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4)); nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16)); /* Store the availability bit if requested. */ nir_if *availability_if = nir_if_create(b.shader); - availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))); + availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)); nir_cf_node_insert(b.cursor, &availability_if->cf_node); b.cursor = nir_after_cf_list(&availability_if->then_list); nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo); - store->src[0] = nir_src_for_ssa(available); + store->src[0] = nir_src_for_ssa(available32); store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa); store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size))); nir_intrinsic_set_write_mask(store, 0x1); @@ -393,7 +401,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { b.cursor = nir_after_cf_node(&availability_if->cf_node); nir_if *available_if = nir_if_create(b.shader); - available_if->condition = nir_src_for_ssa(available); + available_if->condition = nir_src_for_ssa(nir_i2b(&b, available32)); nir_cf_node_insert(b.cursor, &available_if->cf_node); b.cursor = nir_after_cf_list(&available_if->then_list); @@ -401,7 +409,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { nir_store_var(&b, output_offset, output_base, 0x1); for (int i = 0; i < 11; ++i) { nir_if *store_if = nir_if_create(b.shader); - store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i))); + store_if->condition = nir_src_for_ssa(nir_test_flag(&b, stats_mask, 1u << i)); nir_cf_node_insert(b.cursor, &store_if->cf_node); b.cursor = nir_after_cf_list(&store_if->then_list); @@ -463,8 +471,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) { b.cursor = nir_after_cf_list(&available_if->else_list); available_if = nir_if_create(b.shader); - available_if->condition = nir_src_for_ssa(nir_iand(&b, flags, - nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT))); + available_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT)); nir_cf_node_insert(b.cursor, &available_if->cf_node); b.cursor = nir_after_cf_list(&available_if->then_list); @@ -563,12 +570,12 @@ build_tfb_query_shader(struct radv_device *device) glsl_vector_type(GLSL_TYPE_UINT64, 2), "result"); nir_variable *available = - nir_local_variable_create(b.impl, glsl_int_type(), "available"); + nir_local_variable_create(b.impl, glsl_bool_type(), "available"); nir_store_var(&b, result, nir_vec2(&b, nir_imm_int64(&b, 0), nir_imm_int64(&b, 0)), 0x3); - nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1); + nir_store_var(&b, available, nir_imm_false(&b), 0x1); nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags"); @@ -576,22 +583,24 @@ build_tfb_query_shader(struct radv_device *device) nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + dst_buf->num_components = 1; nir_intrinsic_set_desc_set(dst_buf, 0); nir_intrinsic_set_binding(dst_buf, 0); - nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &dst_buf->instr); nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader, nir_intrinsic_vulkan_resource_index); src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + src_buf->num_components = 1; nir_intrinsic_set_desc_set(src_buf, 0); nir_intrinsic_set_binding(src_buf, 1); - nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL); + nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL); nir_builder_instr_insert(&b, &src_buf->instr); /* Compute global ID. */ - nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0); - nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0); + nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b); + nir_ssa_def *wg_id = nir_load_work_group_id(&b); nir_ssa_def *block_size = nir_imm_ivec4(&b, b.shader->info.cs.local_size[0], b.shader->info.cs.local_size[1], @@ -627,8 +636,8 @@ build_tfb_query_shader(struct radv_device *device) avails[1] = nir_iand(&b, nir_channel(&b, &load2->dest.ssa, 1), nir_channel(&b, &load2->dest.ssa, 3)); nir_ssa_def *result_is_available = - nir_iand(&b, nir_iand(&b, avails[0], avails[1]), - nir_imm_int(&b, 0x80000000)); + nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]), + nir_imm_int(&b, 0x80000000))); /* Only compute result if available. */ nir_if *available_if = nir_if_create(b.shader); @@ -661,13 +670,13 @@ build_tfb_query_shader(struct radv_device *device) nir_store_var(&b, result, nir_vec2(&b, num_primitive_written, primitive_storage_needed), 0x3); - nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1); + nir_store_var(&b, available, nir_imm_true(&b), 0x1); b.cursor = nir_after_cf_node(&available_if->cf_node); /* Determine if result is 64 or 32 bit. */ nir_ssa_def *result_is_64bit = - nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_64_BIT)); + nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT); nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16), nir_imm_int(&b, 8)); @@ -675,8 +684,7 @@ build_tfb_query_shader(struct radv_device *device) /* Store the result if complete or partial results have been requested. */ nir_if *store_if = nir_if_create(b.shader); store_if->condition = - nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, - nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), + nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available))); nir_cf_node_insert(b.cursor, &store_if->cf_node); @@ -714,14 +722,13 @@ build_tfb_query_shader(struct radv_device *device) /* Store the availability bit if requested. */ nir_if *availability_if = nir_if_create(b.shader); availability_if->condition = - nir_src_for_ssa(nir_iand(&b, flags, - nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))); + nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)); nir_cf_node_insert(b.cursor, &availability_if->cf_node); b.cursor = nir_after_cf_list(&availability_if->then_list); store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo); - store->src[0] = nir_src_for_ssa(nir_load_var(&b, available)); + store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available))); store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa); store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base)); nir_intrinsic_set_write_mask(store, 0x1); @@ -1060,7 +1067,8 @@ VkResult radv_CreateQueryPool( pool->size += 4 * pCreateInfo->queryCount; pool->bo = device->ws->buffer_create(device->ws, pool->size, - 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING); + 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING, + RADV_BO_PRIORITY_QUERY_POOL); if (!pool->bo) { vk_free2(&device->alloc, pAllocator, pool); @@ -1133,17 +1141,16 @@ VkResult radv_GetQueryPoolResults( available = *(uint64_t *)src != TIMESTAMP_NOT_READY; } - if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) { + if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) result = VK_NOT_READY; - break; - - } if (flags & VK_QUERY_RESULT_64_BIT) { - *(uint64_t*)dest = *(uint64_t*)src; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint64_t*)dest = *(uint64_t*)src; dest += 8; } else { - *(uint32_t*)dest = *(uint32_t*)src; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint32_t*)dest = *(uint32_t*)src; dest += 4; } break; @@ -1168,45 +1175,49 @@ VkResult radv_GetQueryPoolResults( } } - if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) { + if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) result = VK_NOT_READY; - break; - - } if (flags & VK_QUERY_RESULT_64_BIT) { - *(uint64_t*)dest = sample_count; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint64_t*)dest = sample_count; dest += 8; } else { - *(uint32_t*)dest = sample_count; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint32_t*)dest = sample_count; dest += 4; } break; } case VK_QUERY_TYPE_PIPELINE_STATISTICS: { - if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) { + if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) result = VK_NOT_READY; - break; - - } const uint64_t *start = (uint64_t*)src; const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size); if (flags & VK_QUERY_RESULT_64_BIT) { uint64_t *dst = (uint64_t*)dest; dest += util_bitcount(pool->pipeline_stats_mask) * 8; - for(int i = 0; i < 11; ++i) - if(pool->pipeline_stats_mask & (1u << i)) - *dst++ = stop[pipeline_statistics_indices[i]] - - start[pipeline_statistics_indices[i]]; + for(int i = 0; i < 11; ++i) { + if(pool->pipeline_stats_mask & (1u << i)) { + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *dst = stop[pipeline_statistics_indices[i]] - + start[pipeline_statistics_indices[i]]; + dst++; + } + } } else { uint32_t *dst = (uint32_t*)dest; dest += util_bitcount(pool->pipeline_stats_mask) * 4; - for(int i = 0; i < 11; ++i) - if(pool->pipeline_stats_mask & (1u << i)) - *dst++ = stop[pipeline_statistics_indices[i]] - - start[pipeline_statistics_indices[i]]; + for(int i = 0; i < 11; ++i) { + if(pool->pipeline_stats_mask & (1u << i)) { + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *dst = stop[pipeline_statistics_indices[i]] - + start[pipeline_statistics_indices[i]]; + dst++; + } + } } break; } @@ -1227,23 +1238,25 @@ VkResult radv_GetQueryPoolResults( available = 0; } - if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) { + if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) result = VK_NOT_READY; - break; - } num_primitives_written = src64[3] - src64[1]; primitive_storage_needed = src64[2] - src64[0]; if (flags & VK_QUERY_RESULT_64_BIT) { - *(uint64_t *)dest = num_primitives_written; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint64_t *)dest = num_primitives_written; dest += 8; - *(uint64_t *)dest = primitive_storage_needed; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint64_t *)dest = primitive_storage_needed; dest += 8; } else { - *(uint32_t *)dest = num_primitives_written; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint32_t *)dest = num_primitives_written; dest += 4; - *(uint32_t *)dest = primitive_storage_needed; + if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) + *(uint32_t *)dest = primitive_storage_needed; dest += 4; } break; @@ -1336,8 +1349,11 @@ void radv_CmdCopyQueryPoolResults( if (flags & VK_QUERY_RESULT_WAIT_BIT) { + /* Wait on the high 32 bits of the timestamp in + * case the low part is 0xffffffff. + */ radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL, - local_src_va, + local_src_va + 4, TIMESTAMP_NOT_READY >> 32, 0xffffffff); } @@ -1425,6 +1441,28 @@ void radv_CmdResetQueryPool( } } +void radv_ResetQueryPoolEXT( + VkDevice _device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount) +{ + RADV_FROM_HANDLE(radv_query_pool, pool, queryPool); + + uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP + ? TIMESTAMP_NOT_READY : 0; + uint32_t *data = (uint32_t*)(pool->ptr + firstQuery * pool->stride); + uint32_t *data_end = (uint32_t*)(pool->ptr + (firstQuery + queryCount) * pool->stride); + + for(uint32_t *p = data; p != data_end; ++p) + *p = value; + + if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) { + memset(pool->ptr + pool->availability_offset + firstQuery * 4, + 0, queryCount * 4); + } +} + static unsigned event_type_for_stream(unsigned stream) { switch (stream) { @@ -1565,7 +1603,7 @@ static void emit_end_query(struct radv_cmd_buffer *cmd_buffer, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_VALUE_32BIT, - avail_va, 0, 1, + avail_va, 1, cmd_buffer->gfx9_eop_bug_va); break; case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: @@ -1688,7 +1726,7 @@ void radv_CmdWriteTimestamp( radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM | COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) | - COPY_DATA_DST_SEL(V_370_MEM_ASYNC)); + COPY_DATA_DST_SEL(V_370_MEM)); radeon_emit(cs, 0); radeon_emit(cs, 0); radeon_emit(cs, query_va); @@ -1700,7 +1738,7 @@ void radv_CmdWriteTimestamp( mec, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_TIMESTAMP, - query_va, 0, 0, + query_va, 0, cmd_buffer->gfx9_eop_bug_va); break; }