tree-wide: replace MAYBE_UNUSED with ASSERTED
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
index 72593f19ba5b3c49964e10f5ba1aba19bf28007c..a833fc4221b8afcb404e326885f8bd5aae8118ed 100644 (file)
@@ -884,6 +884,47 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
        cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
+static void
+radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
+                         struct radv_pipeline *pipeline)
+{
+       const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
+
+
+       if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
+               return;
+
+       if (old_pipeline &&
+           old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
+           old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
+               return;
+
+       bool binning_flush = false;
+       if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
+           cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
+           cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
+           cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+               binning_flush = !old_pipeline ||
+                       G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
+                       G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
+       }
+
+       radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
+                              pipeline->graphics.binning.pa_sc_binner_cntl_0 |
+                              S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
+
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+               radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
+                                      pipeline->graphics.binning.db_dfsm_control);
+       } else {
+               radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
+                                      pipeline->graphics.binning.db_dfsm_control);
+       }
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
+
+
 static void
 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
                          struct radv_shader_variant *shader)
@@ -1097,6 +1138,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
                return;
 
        radv_update_multisample_state(cmd_buffer, pipeline);
+       radv_update_binning_state(cmd_buffer, pipeline);
 
        cmd_buffer->scratch_size_needed =
                                  MAX2(cmd_buffer->scratch_size_needed,
@@ -1896,7 +1938,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
                struct radv_attachment_info *att = &framebuffer->attachments[idx];
                struct radv_image *image = att->attachment->image;
                radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
-               MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
+               ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
                                                                                cmd_buffer->queue_family_index,
                                                                                cmd_buffer->queue_family_index);
                /* We currently don't support writing decompressed HTILE */
@@ -2172,7 +2214,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
        if (flush_indirect_descriptors)
                radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+       ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
                                                           cmd_buffer->cs,
                                                           MAX_SETS * MESA_SHADER_STAGES * 4);
 
@@ -2258,7 +2300,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
                va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
                va += offset;
 
-               MAYBE_UNUSED unsigned cdw_max =
+               ASSERTED unsigned cdw_max =
                        radeon_check_space(cmd_buffer->device->ws,
                                           cmd_buffer->cs, MESA_SHADER_STAGES * 4);
 
@@ -2419,8 +2461,15 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
                        desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
                                  S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
                                  S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
-                                 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
-                                 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+                                 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+                       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+                               desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+                                          S_008F0C_OOB_SELECT(3) |
+                                          S_008F0C_RESOURCE_LEVEL(1);
+                       } else {
+                               desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+                       }
                }
 
                va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
@@ -2492,6 +2541,21 @@ struct radv_draw_info {
        uint64_t strmout_buffer_offset;
 };
 
+static uint32_t
+radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
+{
+       switch (cmd_buffer->state.index_type) {
+       case V_028A7C_VGT_INDEX_8:
+               return 0xffu;
+       case V_028A7C_VGT_INDEX_16:
+               return 0xffffu;
+       case V_028A7C_VGT_INDEX_32:
+               return 0xffffffffu;
+       default:
+               unreachable("invalid index type");
+       }
+}
+
 static void
 si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
                           bool instanced_draw, bool indirect_draw,
@@ -2563,7 +2627,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
 
        if (primitive_reset_en) {
                uint32_t primitive_reset_index =
-                       state->index_type ? 0xffffffffu : 0xffffu;
+                       radv_get_primitive_reset_index(cmd_buffer);
 
                if (primitive_reset_index != state->last_primitive_reset_index) {
                        radeon_set_context_reg(cs,
@@ -3184,6 +3248,36 @@ void radv_CmdBindVertexBuffers(
        cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
 }
 
+static uint32_t
+vk_to_index_type(VkIndexType type)
+{
+       switch (type) {
+       case VK_INDEX_TYPE_UINT8_EXT:
+               return V_028A7C_VGT_INDEX_8;
+       case VK_INDEX_TYPE_UINT16:
+               return V_028A7C_VGT_INDEX_16;
+       case VK_INDEX_TYPE_UINT32:
+               return V_028A7C_VGT_INDEX_32;
+       default:
+               unreachable("invalid index type");
+       }
+}
+
+static uint32_t
+radv_get_vgt_index_size(uint32_t type)
+{
+       switch (type) {
+       case V_028A7C_VGT_INDEX_8:
+               return 1;
+       case V_028A7C_VGT_INDEX_16:
+               return 2;
+       case V_028A7C_VGT_INDEX_32:
+               return 4;
+       default:
+               unreachable("invalid index type");
+       }
+}
+
 void radv_CmdBindIndexBuffer(
        VkCommandBuffer                             commandBuffer,
        VkBuffer buffer,
@@ -3202,12 +3296,12 @@ void radv_CmdBindIndexBuffer(
 
        cmd_buffer->state.index_buffer = index_buffer;
        cmd_buffer->state.index_offset = offset;
-       cmd_buffer->state.index_type = indexType; /* vk matches hw */
+       cmd_buffer->state.index_type = vk_to_index_type(indexType);
        cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
        cmd_buffer->state.index_va += index_buffer->offset + offset;
 
-       int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
-       cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
+       int index_size = radv_get_vgt_index_size(indexType);
+       cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
        cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
        radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
 }
@@ -3376,7 +3470,7 @@ void radv_CmdPushDescriptorSetKHR(
         * because it is invalid, according to Vulkan spec.
         */
        for (int i = 0; i < descriptorWriteCount; i++) {
-               MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
+               ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
                assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
        }
 
@@ -3557,7 +3651,7 @@ void radv_CmdSetViewport(
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
        struct radv_cmd_state *state = &cmd_buffer->state;
-       MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
+       ASSERTED const uint32_t total_count = firstViewport + viewportCount;
 
        assert(firstViewport < MAX_VIEWPORTS);
        assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
@@ -3581,7 +3675,7 @@ void radv_CmdSetScissor(
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
        struct radv_cmd_state *state = &cmd_buffer->state;
-       MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
+       ASSERTED const uint32_t total_count = firstScissor + scissorCount;
 
        assert(firstScissor < MAX_SCISSORS);
        assert(total_count >= 1 && total_count <= MAX_SCISSORS);
@@ -3743,7 +3837,7 @@ void radv_CmdSetDiscardRectangleEXT(
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
        struct radv_cmd_state *state = &cmd_buffer->state;
-       MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
+       ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
 
        assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
        assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
@@ -3966,7 +4060,7 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
        struct radv_cmd_state *state = &cmd_buffer->state;
        struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+       ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
                                                           cmd_buffer->cs, 4096);
 
        radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
@@ -4226,7 +4320,7 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
                }
 
                if (info->indexed) {
-                       int index_size = state->index_type ? 4 : 2;
+                       int index_size = radv_get_vgt_index_size(state->index_type);
                        uint64_t index_va;
 
                        index_va = state->index_va;
@@ -4305,8 +4399,11 @@ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
        if (cmd_buffer->state.dirty & used_states)
                return true;
 
+       uint32_t primitive_reset_index =
+               radv_get_primitive_reset_index(cmd_buffer);
+
        if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
-           (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
+           primitive_reset_index != state->last_primitive_reset_index)
                return true;
 
        return false;
@@ -4368,7 +4465,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer,
                (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
                cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
 
-       MAYBE_UNUSED unsigned cdw_max =
+       ASSERTED unsigned cdw_max =
                radeon_check_space(cmd_buffer->device->ws,
                                   cmd_buffer->cs, 4096);
 
@@ -4623,7 +4720,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
        loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
                                    AC_UD_CS_GRID_SIZE);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+       ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
 
        if (info->indirect) {
                uint64_t va = radv_buffer_get_va(info->indirect->bo);
@@ -5240,7 +5337,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer,
 
                radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
 
-               MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+               ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
 
                radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
                assert(cmd_buffer->cs->cdw <= cdw_max);
@@ -5359,7 +5456,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
 
        radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
+       ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
 
        /* Flags that only require a top-of-pipe event. */
        VkPipelineStageFlags top_of_pipe_flags =