configure.ac: split the wayland client/server confusion
[mesa.git] / src / amd / vulkan / radv_query.c
index 88d8ccb050cf27bce441bb08e0f8aab0d443a66a..3c40774042dd67b43ae4f07a0fdeabc9bb7893fd 100644 (file)
@@ -44,11 +44,6 @@ static unsigned get_max_db(struct radv_device *device)
        unsigned num_db = device->physical_device->rad_info.num_render_backends;
        MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
 
-       if (device->physical_device->rad_info.chip_class == SI)
-               num_db = 8;
-       else
-               num_db = MAX2(8, num_db);
-
        /* Otherwise we need to change the query reset procedure */
        assert(rb_mask == ((1ull << num_db) - 1));
 
@@ -614,12 +609,10 @@ VkResult radv_device_init_meta_query_state(struct radv_device *device)
                                             radv_pipeline_cache_to_handle(&device->meta_state.cache),
                                             1, &pipeline_statistics_vk_pipeline_info, NULL,
                                             &device->meta_state.query.pipeline_statistics_query_pipeline);
-       if (result != VK_SUCCESS)
-               goto fail;
 
-       return VK_SUCCESS;
 fail:
-       radv_device_finish_meta_query_state(device);
+       if (result != VK_SUCCESS)
+               radv_device_finish_meta_query_state(device);
        ralloc_free(occlusion_cs.nir);
        ralloc_free(pipeline_statistics_cs.nir);
        return result;
@@ -999,13 +992,7 @@ void radv_CmdCopyQueryPoolResults(
                                uint64_t avail_va = va + pool->availability_offset + 4 * query;
 
                                /* This waits on the ME. All copies below are done on the ME */
-                               radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
-                               radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
-                               radeon_emit(cs, avail_va);
-                               radeon_emit(cs, avail_va >> 32);
-                               radeon_emit(cs, 1); /* reference value */
-                               radeon_emit(cs, 0xffffffff); /* mask */
-                               radeon_emit(cs, 4); /* poll interval */
+                               si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff);
                        }
                }
                radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
@@ -1028,13 +1015,7 @@ void radv_CmdCopyQueryPoolResults(
                                uint64_t avail_va = va + pool->availability_offset + 4 * query;
 
                                /* This waits on the ME. All copies below are done on the ME */
-                               radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
-                               radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
-                               radeon_emit(cs, avail_va);
-                               radeon_emit(cs, avail_va >> 32);
-                               radeon_emit(cs, 1); /* reference value */
-                               radeon_emit(cs, 0xffffffff); /* mask */
-                               radeon_emit(cs, 4); /* poll interval */
+                               si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff);
                        }
                        if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
                                uint64_t avail_va = va + pool->availability_offset + 4 * query;
@@ -1158,7 +1139,7 @@ void radv_CmdEndQuery(
 
                break;
        case VK_QUERY_TYPE_PIPELINE_STATISTICS:
-               radeon_check_space(cmd_buffer->device->ws, cs, 10);
+               radeon_check_space(cmd_buffer->device->ws, cs, 16);
 
                va += pipelinestat_block_size;
 
@@ -1167,13 +1148,12 @@ void radv_CmdEndQuery(
                radeon_emit(cs, va);
                radeon_emit(cs, va >> 32);
 
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-               radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) |
-                               EVENT_INDEX(5));
-               radeon_emit(cs, avail_va);
-               radeon_emit(cs, (avail_va >> 32) | EOP_DATA_SEL(1));
-               radeon_emit(cs, 1);
-               radeon_emit(cs, 0);
+               si_cs_emit_write_event_eop(cs,
+                                          false,
+                                          cmd_buffer->device->physical_device->rad_info.chip_class,
+                                          false,
+                                          EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
+                                          1, avail_va, 0, 1);
                break;
        default:
                unreachable("ending unhandled query type");
@@ -1196,7 +1176,7 @@ void radv_CmdWriteTimestamp(
 
        cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 14);
+       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28);
 
        switch(pipelineStage) {
        case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
@@ -1218,37 +1198,18 @@ void radv_CmdWriteTimestamp(
                radeon_emit(cs, 1);
                break;
        default:
-               if (mec) {
-                       radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
-                       radeon_emit(cs, 3 << 29);
-                       radeon_emit(cs, query_va);
-                       radeon_emit(cs, query_va >> 32);
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-
-                       radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
-                       radeon_emit(cs, 1 << 29);
-                       radeon_emit(cs, avail_va);
-                       radeon_emit(cs, avail_va >> 32);
-                       radeon_emit(cs, 1);
-                       radeon_emit(cs, 0);
-               } else {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
-                       radeon_emit(cs, query_va);
-                       radeon_emit(cs, (3 << 29) | ((query_va >> 32) & 0xFFFF));
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
-                       radeon_emit(cs, avail_va);
-                       radeon_emit(cs, (1 << 29) | ((avail_va >> 32) & 0xFFFF));
-                       radeon_emit(cs, 1);
-                       radeon_emit(cs, 0);
-               }
+               si_cs_emit_write_event_eop(cs,
+                                          false,
+                                          cmd_buffer->device->physical_device->rad_info.chip_class,
+                                          mec,
+                                          V_028A90_BOTTOM_OF_PIPE_TS, 0,
+                                          3, query_va, 0, 0);
+               si_cs_emit_write_event_eop(cs,
+                                          false,
+                                          cmd_buffer->device->physical_device->rad_info.chip_class,
+                                          mec,
+                                          V_028A90_BOTTOM_OF_PIPE_TS, 0,
+                                          1, avail_va, 0, 1);
                break;
        }