tree-wide: replace MAYBE_UNUSED with ASSERTED
[mesa.git] / src / amd / vulkan / radv_query.c
index 9993b9acd6d2e9bfdeafde725ac5b59d5dc0daa2..1da8100b3a165c344859337322194502b28a08c2 100644 (file)
 static const int pipelinestat_block_size = 11 * 8;
 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
 
-static unsigned get_max_db(struct radv_device *device)
+static nir_ssa_def *nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
 {
-       unsigned num_db = device->physical_device->rad_info.num_render_backends;
-       MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
-
-       /* Otherwise we need to change the query reset procedure */
-       assert(rb_mask == ((1ull << num_db) - 1));
-
-       return num_db;
+       return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
 }
 
 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
@@ -102,12 +96,14 @@ build_occlusion_query_shader(struct radv_device *device) {
         *      uint64_t dst_offset = dst_stride * global_id.x;
         *      bool available = true;
         *      for (int i = 0; i < db_count; ++i) {
-        *              uint64_t start = src_buf[src_offset + 16 * i];
-        *              uint64_t end = src_buf[src_offset + 16 * i + 8];
-        *              if ((start & (1ull << 63)) && (end & (1ull << 63)))
-        *                      result += end - start;
-        *              else
-        *                      available = false;
+        *              if (enabled_rb_mask & (1 << i)) {
+        *                      uint64_t start = src_buf[src_offset + 16 * i];
+        *                      uint64_t end = src_buf[src_offset + 16 * i + 8];
+        *                      if ((start & (1ull << 63)) && (end & (1ull << 63)))
+        *                              result += end - start;
+        *                      else
+        *                              available = false;
+        *              }
         *      }
         *      uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
         *      if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
@@ -132,29 +128,32 @@ build_occlusion_query_shader(struct radv_device *device) {
        nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
        nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
        nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
-       nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available");
-       unsigned db_count = get_max_db(device);
+       nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
+       unsigned enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
+       unsigned db_count = device->physical_device->rad_info.num_render_backends;
 
        nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
 
        nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       dst_buf->num_components = 1;
        nir_intrinsic_set_desc_set(dst_buf, 0);
        nir_intrinsic_set_binding(dst_buf, 0);
-       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &dst_buf->instr);
 
        nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       src_buf->num_components = 1;
        nir_intrinsic_set_desc_set(src_buf, 0);
        nir_intrinsic_set_binding(src_buf, 1);
-       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &src_buf->instr);
 
-       nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
-       nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
+       nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
+       nir_ssa_def *wg_id = nir_load_work_group_id(&b);
        nir_ssa_def *block_size = nir_imm_ivec4(&b,
                                                b.shader->info.cs.local_size[0],
                                                b.shader->info.cs.local_size[1],
@@ -170,7 +169,7 @@ build_occlusion_query_shader(struct radv_device *device) {
 
        nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
        nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
-       nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
+       nir_store_var(&b, available, nir_imm_true(&b), 0x1);
 
        nir_loop *outer_loop = nir_loop_create(b.shader);
        nir_builder_cf_insert(&b, &outer_loop->cf_node);
@@ -179,6 +178,16 @@ build_occlusion_query_shader(struct radv_device *device) {
        nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
        radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
 
+       nir_ssa_def *enabled_cond =
+               nir_iand(&b, nir_imm_int(&b, enabled_rb_mask),
+                            nir_ishl(&b, nir_imm_int(&b, 1), current_outer_count));
+
+       nir_if *enabled_if = nir_if_create(b.shader);
+       enabled_if->condition = nir_src_for_ssa(nir_i2b(&b, enabled_cond));
+       nir_cf_node_insert(b.cursor, &enabled_if->cf_node);
+
+       b.cursor = nir_after_cf_list(&enabled_if->then_list);
+
        nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
        load_offset = nir_iadd(&b, input_base, load_offset);
 
@@ -208,18 +217,17 @@ build_occlusion_query_shader(struct radv_device *device) {
 
        b.cursor = nir_after_cf_list(&update_if->else_list);
 
-       nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
+       nir_store_var(&b, available, nir_imm_false(&b), 0x1);
 
        b.cursor = nir_after_cf_node(&outer_loop->cf_node);
 
        /* Store the result if complete or if partial results have been requested. */
 
-       nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
-                                               nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
+       nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
        nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
 
        nir_if *store_if = nir_if_create(b.shader);
-       store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available)));
+       store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
        nir_cf_node_insert(b.cursor, &store_if->cf_node);
 
        b.cursor = nir_after_cf_list(&store_if->then_list);
@@ -253,13 +261,13 @@ build_occlusion_query_shader(struct radv_device *device) {
        /* Store the availability bit if requested. */
 
        nir_if *availability_if = nir_if_create(b.shader);
-       availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
+       availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
        nir_cf_node_insert(b.cursor, &availability_if->cf_node);
 
        b.cursor = nir_after_cf_list(&availability_if->then_list);
 
        store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
-       store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
+       store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
        store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
        store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
        nir_intrinsic_set_write_mask(store, 0x1);
@@ -291,11 +299,11 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
         *      uint64_t dst_offset = dst_base;
         *      uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
         *      uint32_t elem_count = stats_mask >> 16;
-        *      uint32_t available = src_buf[avail_offset + 4 * global_id.x];
+        *      uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
         *      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
-        *              dst_buf[dst_offset + elem_count * elem_size] = available;
+        *              dst_buf[dst_offset + elem_count * elem_size] = available32;
         *      }
-        *      if (available) {
+        *      if ((bool)available32) {
         *              // repeat 11 times:
         *              if (stats_mask & (1 << 0)) {
         *                      uint64_t start = src_buf[src_offset + 8 * indices[0]];
@@ -330,21 +338,23 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
        nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       dst_buf->num_components = 1;;
        nir_intrinsic_set_desc_set(dst_buf, 0);
        nir_intrinsic_set_binding(dst_buf, 0);
-       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &dst_buf->instr);
 
        nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       src_buf->num_components = 1;
        nir_intrinsic_set_desc_set(src_buf, 0);
        nir_intrinsic_set_binding(src_buf, 1);
-       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &src_buf->instr);
 
-       nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
-       nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
+       nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
+       nir_ssa_def *wg_id = nir_load_work_group_id(&b);
        nir_ssa_def *block_size = nir_imm_ivec4(&b,
                                                b.shader->info.cs.local_size[0],
                                                b.shader->info.cs.local_size[1],
@@ -367,23 +377,22 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
        nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
        load->num_components = 1;
        nir_builder_instr_insert(&b, &load->instr);
-       nir_ssa_def *available = &load->dest.ssa;
+       nir_ssa_def *available32 = &load->dest.ssa;
 
-       nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
-                                               nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
+       nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
        nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
        nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
 
        /* Store the availability bit if requested. */
 
        nir_if *availability_if = nir_if_create(b.shader);
-       availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
+       availability_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
        nir_cf_node_insert(b.cursor, &availability_if->cf_node);
 
        b.cursor = nir_after_cf_list(&availability_if->then_list);
 
        nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
-       store->src[0] = nir_src_for_ssa(available);
+       store->src[0] = nir_src_for_ssa(available32);
        store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
        store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
        nir_intrinsic_set_write_mask(store, 0x1);
@@ -393,7 +402,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
        b.cursor = nir_after_cf_node(&availability_if->cf_node);
 
        nir_if *available_if = nir_if_create(b.shader);
-       available_if->condition = nir_src_for_ssa(available);
+       available_if->condition = nir_src_for_ssa(nir_i2b(&b, available32));
        nir_cf_node_insert(b.cursor, &available_if->cf_node);
 
        b.cursor = nir_after_cf_list(&available_if->then_list);
@@ -401,7 +410,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
        nir_store_var(&b, output_offset, output_base, 0x1);
        for (int i = 0; i < 11; ++i) {
                nir_if *store_if = nir_if_create(b.shader);
-               store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i)));
+               store_if->condition = nir_src_for_ssa(nir_test_flag(&b, stats_mask, 1u << i));
                nir_cf_node_insert(b.cursor, &store_if->cf_node);
 
                b.cursor = nir_after_cf_list(&store_if->then_list);
@@ -463,8 +472,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device) {
        b.cursor = nir_after_cf_list(&available_if->else_list);
 
        available_if = nir_if_create(b.shader);
-       available_if->condition = nir_src_for_ssa(nir_iand(&b, flags,
-                                                              nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)));
+       available_if->condition = nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT));
        nir_cf_node_insert(b.cursor, &available_if->cf_node);
 
        b.cursor = nir_after_cf_list(&available_if->then_list);
@@ -563,12 +571,12 @@ build_tfb_query_shader(struct radv_device *device)
                                          glsl_vector_type(GLSL_TYPE_UINT64, 2),
                                          "result");
        nir_variable *available =
-               nir_local_variable_create(b.impl, glsl_int_type(), "available");
+               nir_local_variable_create(b.impl, glsl_bool_type(), "available");
 
        nir_store_var(&b, result,
                      nir_vec2(&b, nir_imm_int64(&b, 0),
                                   nir_imm_int64(&b, 0)), 0x3);
-       nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
+       nir_store_var(&b, available, nir_imm_false(&b), 0x1);
 
        nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
 
@@ -576,22 +584,24 @@ build_tfb_query_shader(struct radv_device *device)
        nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       dst_buf->num_components = 1;
        nir_intrinsic_set_desc_set(dst_buf, 0);
        nir_intrinsic_set_binding(dst_buf, 0);
-       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, dst_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &dst_buf->instr);
 
        nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
                                                                  nir_intrinsic_vulkan_resource_index);
        src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
+       src_buf->num_components = 1;
        nir_intrinsic_set_desc_set(src_buf, 0);
        nir_intrinsic_set_binding(src_buf, 1);
-       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
+       nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, src_buf->num_components, 32, NULL);
        nir_builder_instr_insert(&b, &src_buf->instr);
 
        /* Compute global ID. */
-       nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
-       nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
+       nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
+       nir_ssa_def *wg_id = nir_load_work_group_id(&b);
        nir_ssa_def *block_size = nir_imm_ivec4(&b,
                                                b.shader->info.cs.local_size[0],
                                                b.shader->info.cs.local_size[1],
@@ -627,8 +637,8 @@ build_tfb_query_shader(struct radv_device *device)
        avails[1] = nir_iand(&b, nir_channel(&b, &load2->dest.ssa, 1),
                                 nir_channel(&b, &load2->dest.ssa, 3));
        nir_ssa_def *result_is_available =
-               nir_iand(&b, nir_iand(&b, avails[0], avails[1]),
-                            nir_imm_int(&b, 0x80000000));
+               nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]),
+                                        nir_imm_int(&b, 0x80000000)));
 
        /* Only compute result if available. */
        nir_if *available_if = nir_if_create(b.shader);
@@ -661,13 +671,13 @@ build_tfb_query_shader(struct radv_device *device)
        nir_store_var(&b, result,
                      nir_vec2(&b, num_primitive_written,
                                   primitive_storage_needed), 0x3);
-       nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
+       nir_store_var(&b, available, nir_imm_true(&b), 0x1);
 
        b.cursor = nir_after_cf_node(&available_if->cf_node);
 
        /* Determine if result is 64 or 32 bit. */
        nir_ssa_def *result_is_64bit =
-               nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
+               nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
        nir_ssa_def *result_size =
                nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16),
                          nir_imm_int(&b, 8));
@@ -675,8 +685,7 @@ build_tfb_query_shader(struct radv_device *device)
        /* Store the result if complete or partial results have been requested. */
        nir_if *store_if = nir_if_create(b.shader);
        store_if->condition =
-               nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags,
-                                                    nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)),
+               nir_src_for_ssa(nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
                                        nir_load_var(&b, available)));
        nir_cf_node_insert(b.cursor, &store_if->cf_node);
 
@@ -714,14 +723,13 @@ build_tfb_query_shader(struct radv_device *device)
        /* Store the availability bit if requested. */
        nir_if *availability_if = nir_if_create(b.shader);
        availability_if->condition =
-               nir_src_for_ssa(nir_iand(&b, flags,
-                                        nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
+               nir_src_for_ssa(nir_test_flag(&b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
        nir_cf_node_insert(b.cursor, &availability_if->cf_node);
 
        b.cursor = nir_after_cf_list(&availability_if->then_list);
 
        store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
-       store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
+       store->src[0] = nir_src_for_ssa(nir_b2i32(&b, nir_load_var(&b, available)));
        store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
        store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
        nir_intrinsic_set_write_mask(store, 0x1);
@@ -1004,8 +1012,8 @@ static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
                                      VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
                                      &push_constants);
 
-       cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
-                                       RADV_CMD_FLAG_INV_VMEM_L1;
+       cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_L2 |
+                                       RADV_CMD_FLAG_INV_VCACHE;
 
        if (flags & VK_QUERY_RESULT_WAIT_BIT)
                cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
@@ -1028,8 +1036,6 @@ VkResult radv_CreateQueryPool(
        struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
                                               sizeof(*pool), 8,
                                               VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-       uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
-                                ? TIMESTAMP_NOT_READY : 0;
 
        if (!pool)
                return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -1037,7 +1043,7 @@ VkResult radv_CreateQueryPool(
 
        switch(pCreateInfo->queryType) {
        case VK_QUERY_TYPE_OCCLUSION:
-               pool->stride = 16 * get_max_db(device);
+               pool->stride = 16 * device->physical_device->rad_info.num_render_backends;
                break;
        case VK_QUERY_TYPE_PIPELINE_STATISTICS:
                pool->stride = pipelinestat_block_size * 2;
@@ -1060,7 +1066,8 @@ VkResult radv_CreateQueryPool(
                pool->size += 4 * pCreateInfo->queryCount;
 
        pool->bo = device->ws->buffer_create(device->ws, pool->size,
-                                            64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
+                                            64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING,
+                                            RADV_BO_PRIORITY_QUERY_POOL);
 
        if (!pool->bo) {
                vk_free2(&device->alloc, pAllocator, pool);
@@ -1074,7 +1081,6 @@ VkResult radv_CreateQueryPool(
                vk_free2(&device->alloc, pAllocator, pool);
                return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
        }
-       memset(pool->ptr, initial_value, pool->size);
 
        *pQueryPool = radv_query_pool_to_handle(pool);
        return VK_SUCCESS;
@@ -1116,13 +1122,6 @@ VkResult radv_GetQueryPoolResults(
                char *src = pool->ptr + query * pool->stride;
                uint32_t available;
 
-               if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
-                       if (flags & VK_QUERY_RESULT_WAIT_BIT)
-                               while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
-                                       ;
-                       available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
-               }
-
                switch (pool->type) {
                case VK_QUERY_TYPE_TIMESTAMP: {
                        available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
@@ -1130,32 +1129,36 @@ VkResult radv_GetQueryPoolResults(
                        if (flags & VK_QUERY_RESULT_WAIT_BIT) {
                                while (*(volatile uint64_t *)src == TIMESTAMP_NOT_READY)
                                        ;
-                               available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
+                               available = true;
                        }
 
-                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
                                result = VK_NOT_READY;
-                               break;
-
-                       }
 
                        if (flags & VK_QUERY_RESULT_64_BIT) {
-                               *(uint64_t*)dest = *(uint64_t*)src;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint64_t*)dest = *(uint64_t*)src;
                                dest += 8;
                        } else {
-                               *(uint32_t*)dest = *(uint32_t*)src;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint32_t*)dest = *(uint32_t*)src;
                                dest += 4;
                        }
                        break;
                }
                case VK_QUERY_TYPE_OCCLUSION: {
                        volatile uint64_t const *src64 = (volatile uint64_t const *)src;
+                       uint32_t db_count = device->physical_device->rad_info.num_render_backends;
+                       uint32_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
                        uint64_t sample_count = 0;
-                       int db_count = get_max_db(device);
                        available = 1;
 
                        for (int i = 0; i < db_count; ++i) {
                                uint64_t start, end;
+
+                               if (!(enabled_rb_mask & (1 << i)))
+                                       continue;
+
                                do {
                                        start = src64[2 * i];
                                        end = src64[2 * i + 1];
@@ -1168,45 +1171,54 @@ VkResult radv_GetQueryPoolResults(
                                }
                        }
 
-                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
                                result = VK_NOT_READY;
-                               break;
-
-                       }
 
                        if (flags & VK_QUERY_RESULT_64_BIT) {
-                               *(uint64_t*)dest = sample_count;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint64_t*)dest = sample_count;
                                dest += 8;
                        } else {
-                               *(uint32_t*)dest = sample_count;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint32_t*)dest = sample_count;
                                dest += 4;
                        }
                        break;
                }
                case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
-                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
-                               result = VK_NOT_READY;
-                               break;
+                       if (flags & VK_QUERY_RESULT_WAIT_BIT)
+                               while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
+                                       ;
+                       available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
 
-                       }
+                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                               result = VK_NOT_READY;
 
                        const uint64_t *start = (uint64_t*)src;
                        const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
                        if (flags & VK_QUERY_RESULT_64_BIT) {
                                uint64_t *dst = (uint64_t*)dest;
                                dest += util_bitcount(pool->pipeline_stats_mask) * 8;
-                               for(int i = 0; i < 11; ++i)
-                                       if(pool->pipeline_stats_mask & (1u << i))
-                                               *dst++ = stop[pipeline_statistics_indices[i]] -
-                                                        start[pipeline_statistics_indices[i]];
+                               for(int i = 0; i < 11; ++i) {
+                                       if(pool->pipeline_stats_mask & (1u << i)) {
+                                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                                       *dst = stop[pipeline_statistics_indices[i]] -
+                                                              start[pipeline_statistics_indices[i]];
+                                               dst++;
+                                       }
+                               }
 
                        } else {
                                uint32_t *dst = (uint32_t*)dest;
                                dest += util_bitcount(pool->pipeline_stats_mask) * 4;
-                               for(int i = 0; i < 11; ++i)
-                                       if(pool->pipeline_stats_mask & (1u << i))
-                                               *dst++ = stop[pipeline_statistics_indices[i]] -
-                                                        start[pipeline_statistics_indices[i]];
+                               for(int i = 0; i < 11; ++i) {
+                                       if(pool->pipeline_stats_mask & (1u << i)) {
+                                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                                       *dst = stop[pipeline_statistics_indices[i]] -
+                                                              start[pipeline_statistics_indices[i]];
+                                               dst++;
+                                       }
+                               }
                        }
                        break;
                }
@@ -1227,23 +1239,25 @@ VkResult radv_GetQueryPoolResults(
                                        available = 0;
                        }
 
-                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
+                       if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
                                result = VK_NOT_READY;
-                               break;
-                       }
 
                        num_primitives_written = src64[3] - src64[1];
                        primitive_storage_needed = src64[2] - src64[0];
 
                        if (flags & VK_QUERY_RESULT_64_BIT) {
-                               *(uint64_t *)dest = num_primitives_written;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint64_t *)dest = num_primitives_written;
                                dest += 8;
-                               *(uint64_t *)dest = primitive_storage_needed;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint64_t *)dest = primitive_storage_needed;
                                dest += 8;
                        } else {
-                               *(uint32_t *)dest = num_primitives_written;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint32_t *)dest = num_primitives_written;
                                dest += 4;
-                               *(uint32_t *)dest = primitive_storage_needed;
+                               if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
+                                       *(uint32_t *)dest = primitive_storage_needed;
                                dest += 4;
                        }
                        break;
@@ -1264,6 +1278,21 @@ VkResult radv_GetQueryPoolResults(
        return result;
 }
 
+static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
+                            struct radv_query_pool *pool)
+{
+       if (cmd_buffer->pending_reset_query) {
+               if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
+                       /* Only need to flush caches if the query pool size is
+                        * large enough to be resetted using the compute shader
+                        * path. Small pools don't need any cache flushes
+                        * because we use a CP dma clear.
+                        */
+                       si_emit_cache_flush(cmd_buffer);
+               }
+       }
+}
+
 void radv_CmdCopyQueryPoolResults(
     VkCommandBuffer                             commandBuffer,
     VkQueryPool                                 queryPool,
@@ -1286,6 +1315,16 @@ void radv_CmdCopyQueryPoolResults(
        radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
        radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
 
+       /* From the Vulkan spec 1.1.108:
+        *
+        * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
+        *  previous uses of vkCmdResetQueryPool in the same queue, without any
+        *  additional synchronization."
+        *
+        * So, we have to flush the caches if the compute shader path was used.
+        */
+       emit_query_flush(cmd_buffer, pool);
+
        switch (pool->type) {
        case VK_QUERY_TYPE_OCCLUSION:
                if (flags & VK_QUERY_RESULT_WAIT_BIT) {
@@ -1332,12 +1371,15 @@ void radv_CmdCopyQueryPoolResults(
                        unsigned query = firstQuery + i;
                        uint64_t local_src_va = va  + query * pool->stride;
 
-                       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
+                       ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
 
 
                        if (flags & VK_QUERY_RESULT_WAIT_BIT) {
+                               /* Wait on the high 32 bits of the timestamp in
+                                * case the low part is 0xffffffff.
+                                */
                                radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL,
-                                                local_src_va,
+                                                local_src_va + 4,
                                                 TIMESTAMP_NOT_READY >> 32,
                                                 0xffffffff);
                        }
@@ -1408,6 +1450,12 @@ void radv_CmdResetQueryPool(
                         ? TIMESTAMP_NOT_READY : 0;
        uint32_t flush_bits = 0;
 
+       /* Make sure to sync all previous work if the given command buffer has
+        * pending active queries. Otherwise the GPU might write queries data
+        * after the reset operation.
+        */
+       cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
        flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
                                       firstQuery * pool->stride,
                                       queryCount * pool->stride, value);
@@ -1425,6 +1473,28 @@ void radv_CmdResetQueryPool(
        }
 }
 
+void radv_ResetQueryPoolEXT(
+       VkDevice                                   _device,
+       VkQueryPool                                 queryPool,
+       uint32_t                                    firstQuery,
+       uint32_t                                    queryCount)
+{
+       RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+
+       uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
+                        ? TIMESTAMP_NOT_READY : 0;
+       uint32_t *data =  (uint32_t*)(pool->ptr + firstQuery * pool->stride);
+       uint32_t *data_end = (uint32_t*)(pool->ptr + (firstQuery + queryCount) * pool->stride);
+
+       for(uint32_t *p = data; p != data_end; ++p)
+               *p = value;
+
+       if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
+               memset(pool->ptr + pool->availability_offset + firstQuery * 4,
+                      0, queryCount * 4);
+       }
+}
+
 static unsigned event_type_for_stream(unsigned stream)
 {
        switch (stream) {
@@ -1549,8 +1619,9 @@ static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
                                           cmd_buffer->device->physical_device->rad_info.chip_class,
                                           radv_cmd_buffer_uses_mec(cmd_buffer),
                                           V_028A90_BOTTOM_OF_PIPE_TS, 0,
+                                          EOP_DST_SEL_MEM,
                                           EOP_DATA_SEL_VALUE_32BIT,
-                                          avail_va, 0, 1,
+                                          avail_va, 1,
                                           cmd_buffer->gfx9_eop_bug_va);
                break;
        case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
@@ -1566,6 +1637,15 @@ static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
        default:
                unreachable("ending unhandled query type");
        }
+
+       cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+                                              RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
+                                              RADV_CMD_FLAG_INV_L2 |
+                                              RADV_CMD_FLAG_INV_VCACHE;
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+               cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+                                                      RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+       }
 }
 
 void radv_CmdBeginQueryIndexedEXT(
@@ -1582,17 +1662,7 @@ void radv_CmdBeginQueryIndexedEXT(
 
        radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
 
-       if (cmd_buffer->pending_reset_query) {
-               if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
-                       /* Only need to flush caches if the query pool size is
-                        * large enough to be resetted using the compute shader
-                        * path. Small pools don't need any cache flushes
-                        * because we use a CP dma clear.
-                        */
-                       si_emit_cache_flush(cmd_buffer);
-                       cmd_buffer->pending_reset_query = false;
-               }
-       }
+       emit_query_flush(cmd_buffer, pool);
 
        va += pool->stride * query;
 
@@ -1669,11 +1739,13 @@ void radv_CmdWriteTimestamp(
 
        radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
 
+       emit_query_flush(cmd_buffer, pool);
+
        int num_queries = 1;
        if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
                num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
+       ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
 
        for (unsigned i = 0; i < num_queries; i++) {
                switch(pipelineStage) {
@@ -1681,7 +1753,7 @@ void radv_CmdWriteTimestamp(
                        radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
                        radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
                                    COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
-                                   COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
+                                   COPY_DATA_DST_SEL(V_370_MEM));
                        radeon_emit(cs, 0);
                        radeon_emit(cs, 0);
                        radeon_emit(cs, query_va);
@@ -1692,8 +1764,9 @@ void radv_CmdWriteTimestamp(
                                                   cmd_buffer->device->physical_device->rad_info.chip_class,
                                                   mec,
                                                   V_028A90_BOTTOM_OF_PIPE_TS, 0,
+                                                  EOP_DST_SEL_MEM,
                                                   EOP_DATA_SEL_TIMESTAMP,
-                                                  query_va, 0, 0,
+                                                  query_va, 0,
                                                   cmd_buffer->gfx9_eop_bug_va);
                        break;
                }