X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=4f592bc7f68829fbd2828a18c268cb135a64903b;hb=bc4548ca3dfbaf8ceb98684c66c344b8de14d655;hp=dcd20595eebd22856da16a1095d07353d4fbb59a;hpb=41199e2eebd4f98d34920a6310ce940a75ef6345;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index dcd20595eeb..4f592bc7f68 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -215,7 +215,7 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) { return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; } enum ring_type radv_queue_family_to_ring(int f) { @@ -301,7 +301,6 @@ radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) static VkResult radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { - cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, @@ -326,6 +325,8 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->record_result = VK_SUCCESS; + memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings)); + for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) { cmd_buffer->descriptors[i].dirty = 0; cmd_buffer->descriptors[i].valid = 0; @@ -338,14 +339,15 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) unsigned fence_offset, eop_bug_offset; void *fence_ptr; - radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, &fence_offset, + radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset, &fence_ptr); + cmd_buffer->gfx9_fence_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); cmd_buffer->gfx9_fence_va += fence_offset; /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0, + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, &eop_bug_offset, &fence_ptr); cmd_buffer->gfx9_eop_bug_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -416,6 +418,8 @@ radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, unsigned *out_offset, void **ptr) { + assert(util_is_power_of_two_nonzero(alignment)); + uint64_t offset = align(cmd_buffer->upload.offset, alignment); if (offset + size > cmd_buffer->upload.size) { if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) @@ -628,6 +632,23 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer, } } +static void +radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline, + gl_shader_stage stage, + int idx, int count, uint32_t *values) +{ + struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); + uint32_t base_reg = pipeline->user_data_0[stage]; + if (loc->sgpr_idx == -1) + return; + + assert(loc->num_sgprs == count); + + radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count); + radeon_emit_array(cmd_buffer->cs, values, count); +} + static void radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pipeline) @@ -648,7 +669,7 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); - radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); + radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); /* GFX9: Flush DFSM when the AA mode changes. */ if (cmd_buffer->device->dfsm_allowed) { @@ -1020,7 +1041,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout layout) { - bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; + bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; @@ -1050,7 +1071,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32)); radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4, - S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch)); + cb->cb_mrt_epitch); } else { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1205,10 +1226,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, if (!framebuffer || !subpass) return; - att_idx = subpass->depth_stencil_attachment.attachment; - if (att_idx == VK_ATTACHMENT_UNUSED) + if (!subpass->depth_stencil_attachment) return; + att_idx = subpass->depth_stencil_attachment->attachment; att = &framebuffer->attachments[att_idx]; if (att->attachment->image != image) return; @@ -1222,7 +1243,7 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, */ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && ds_clear_value.depth == 0.0) { - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; radv_update_zrange_precision(cmd_buffer, &att->ds, image, layout, false); @@ -1255,7 +1276,7 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); @@ -1279,7 +1300,7 @@ radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, uint64_t va = radv_buffer_get_va(image->bo); va += image->offset + image->tc_compat_zrange_offset; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); @@ -1356,7 +1377,7 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -1473,7 +1494,7 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); @@ -1518,14 +1539,13 @@ radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); radeon_emit(cs, 2); } else { - /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating)); radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | COPY_DATA_DST_SEL(COPY_DATA_REG) | @@ -1566,18 +1586,19 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); + assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); radv_emit_fb_color_state(cmd_buffer, i, att, image, layout); radv_load_color_clear_metadata(cmd_buffer, image, i); - if (image->surface.bpe >= 8) + if (image->planes[0].surface.bpe >= 8) num_bpp64_colorbufs++; } - if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - int idx = subpass->depth_stencil_attachment.attachment; - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + if (subpass->depth_stencil_attachment) { + int idx = subpass->depth_stencil_attachment->attachment; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; struct radv_attachment_info *att = &framebuffer->attachments[idx]; struct radv_image *image = att->attachment->image; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); @@ -1608,8 +1629,8 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_X(framebuffer->width) | S_028208_BR_Y(framebuffer->height)); - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { - uint8_t watermark = 4; /* Default value for VI. */ + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { + uint8_t watermark = 4; /* Default value for GFX8. */ /* For optimal DCC performance. */ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { @@ -1670,7 +1691,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) uint32_t db_count_control; if(!cmd_buffer->state.active_occlusion_queries) { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) && pipeline->graphics.disable_out_of_order_rast_for_occlusion && has_perfect_queries) { @@ -1689,7 +1710,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | S_028004_SAMPLE_RATE(sample_rate) | @@ -1901,6 +1922,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, radv_get_descriptors_state(cmd_buffer, bind_point); struct radv_pipeline_layout *layout = pipeline->layout; struct radv_shader_variant *shader, *prev_shader; + bool need_push_constants = false; unsigned offset; void *ptr; uint64_t va; @@ -1910,37 +1932,56 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, (!layout->push_constant_size && !layout->dynamic_offset_count)) return; - if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + - 16 * layout->dynamic_offset_count, - 256, &offset, &ptr)) - return; + radv_foreach_stage(stage, stages) { + if (!pipeline->shaders[stage]) + continue; - memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); - memcpy((char*)ptr + layout->push_constant_size, - descriptors_state->dynamic_buffers, - 16 * layout->dynamic_offset_count); + need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants; + need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets; - va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); - va += offset; + uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts; + uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts; - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, MESA_SHADER_STAGES * 4); + radv_emit_inline_push_consts(cmd_buffer, pipeline, stage, + AC_UD_INLINE_PUSH_CONSTANTS, + count, + (uint32_t *)&cmd_buffer->push_constants[base * 4]); + } - prev_shader = NULL; - radv_foreach_stage(stage, stages) { - shader = radv_get_shader(pipeline, stage); + if (need_push_constants) { + if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + + 16 * layout->dynamic_offset_count, + 256, &offset, &ptr)) + return; + + memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); + memcpy((char*)ptr + layout->push_constant_size, + descriptors_state->dynamic_buffers, + 16 * layout->dynamic_offset_count); + + va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); + va += offset; - /* Avoid redundantly emitting the address for merged stages. */ - if (shader && shader != prev_shader) { - radv_emit_userdata_address(cmd_buffer, pipeline, stage, - AC_UD_PUSH_CONSTANTS, va); + MAYBE_UNUSED unsigned cdw_max = + radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, MESA_SHADER_STAGES * 4); - prev_shader = shader; + prev_shader = NULL; + radv_foreach_stage(stage, stages) { + shader = radv_get_shader(pipeline, stage); + + /* Avoid redundantly emitting the address for merged stages. */ + if (shader && shader != prev_shader) { + radv_emit_userdata_address(cmd_buffer, pipeline, stage, + AC_UD_PUSH_CONSTANTS, va); + + prev_shader = shader; + } } + assert(cmd_buffer->cs->cdw <= cdw_max); } cmd_buffer->push_constant_stages &= ~stages; - assert(cmd_buffer->cs->cdw <= cdw_max); } static void @@ -1949,13 +1990,13 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, { if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && - cmd_buffer->state.pipeline->vertex_elements.count && + cmd_buffer->state.pipeline->num_vertex_bindings && radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; unsigned vb_offset; void *vb_ptr; uint32_t i = 0; - uint32_t count = velems->count; + uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings; uint64_t va; /* allocate some descriptor state for vertex buffers */ @@ -1966,21 +2007,28 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, for (i = 0; i < count; i++) { uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; uint32_t offset; - int vb = velems->binding[i]; - struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer; - uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; + struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; + uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + + if (!buffer) + continue; va = radv_buffer_get_va(buffer->bo); - offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i]; + offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) + if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; else desc[2] = buffer->size - offset; - desc[3] = velems->rsrc_word3[i]; + desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | + S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2058,7 +2106,7 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) /* Set the descriptor. * - * On VI, the format must be non-INVALID, otherwise + * On GFX8, the format must be non-INVALID, otherwise * the buffer will be considered not bound and store * instructions will be no-ops. */ @@ -2155,6 +2203,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, draw_info->indirect, + !!draw_info->strmout_buffer, draw_info->indirect ? 0 : draw_info->count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { @@ -2162,7 +2211,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); - } else if (info->chip_class >= CIK) { + } else if (info->chip_class >= GFX7) { radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); @@ -2425,28 +2474,8 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf void radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, - const struct radv_subpass *subpass, bool transitions) + const struct radv_subpass *subpass) { - if (transitions) { - radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); - - for (unsigned i = 0; i < subpass->color_count; ++i) { - if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) - radv_handle_subpass_image_transition(cmd_buffer, - subpass->color_attachments[i]); - } - - for (unsigned i = 0; i < subpass->input_count; ++i) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->input_attachments[i]); - } - - if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->depth_stencil_attachment); - } - } - cmd_buffer->state.subpass = subpass; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; @@ -2629,7 +2658,7 @@ VkResult radv_BeginCommandBuffer( if (result != VK_SUCCESS) return result; - radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); } if (unlikely(cmd_buffer->device->trace_bo)) { @@ -2919,7 +2948,7 @@ VkResult radv_EndCommandBuffer( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { - if (cmd_buffer->device->physical_device->rad_info.chip_class == SI) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; si_emit_cache_flush(cmd_buffer); } @@ -3409,6 +3438,69 @@ void radv_TrimCommandPool( } } +static uint32_t +radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = state->subpass - state->pass->subpasses; + + /* The id of this subpass shouldn't exceed the number of subpasses in + * this render pass minus 1. + */ + assert(subpass_id < state->pass->subpass_count); + return subpass_id; +} + +static void +radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, + uint32_t subpass_id) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; + + MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, 4096); + + radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + radv_handle_subpass_image_transition(cmd_buffer, + subpass->attachments[i]); + } + + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); + radv_cmd_buffer_clear_subpass(cmd_buffer); + + assert(cmd_buffer->cs->cdw <= cdw_max); +} + +static void +radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + const struct radv_subpass *subpass = state->subpass; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + + radv_cmd_buffer_resolve_subpass(cmd_buffer); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + if (state->pass->attachments[a].last_subpass_idx != subpass_id) + continue; + + VkImageLayout layout = state->pass->attachments[a].final_layout; + radv_handle_subpass_image_transition(cmd_buffer, + (struct radv_subpass_attachment){a, layout}); + } +} + void radv_CmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, @@ -3417,9 +3509,6 @@ void radv_CmdBeginRenderPass( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); - - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, 2048); VkResult result; cmd_buffer->state.framebuffer = framebuffer; @@ -3430,10 +3519,7 @@ void radv_CmdBeginRenderPass( if (result != VK_SUCCESS) return; - radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); - assert(cmd_buffer->cs->cdw <= cdw_max); - - radv_cmd_buffer_clear_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } void radv_CmdBeginRenderPass2KHR( @@ -3451,13 +3537,9 @@ void radv_CmdNextSubpass( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, - 2048); - - radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); - radv_cmd_buffer_clear_subpass(cmd_buffer); + uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer); + radv_cmd_buffer_end_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1); } void radv_CmdNextSubpass2KHR( @@ -3742,11 +3824,11 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER) radv_emit_index_buffer(cmd_buffer); } else { - /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, + /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE, * so the state must be re-emitted before the next indexed * draw. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { cmd_buffer->state.last_index_type = -1; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; } @@ -3767,7 +3849,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, struct radeon_info *rad_info = &cmd_buffer->device->physical_device->rad_info; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; @@ -3777,7 +3859,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->cs, 4096); if (likely(!info->indirect)) { - /* SI-CI treat instance_count==0 as instance_count==1. There is + /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is * no workaround for indirect draws, but we can at least skip * direct draws. */ @@ -3940,55 +4022,6 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - -void radv_CmdDrawIndexedIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.indexed = true; - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - void radv_CmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer _buffer, @@ -4204,7 +4237,7 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer, { struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = pipeline && pipeline != cmd_buffer->state.emitted_compute_pipeline; @@ -4325,13 +4358,7 @@ void radv_CmdEndRenderPass( radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { - VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; - radv_handle_subpass_image_transition(cmd_buffer, - (struct radv_subpass_attachment){i, layout}); - } + radv_cmd_buffer_end_subpass(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); @@ -4363,10 +4390,10 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); unsigned layer_count = radv_get_layerCount(image, range); - uint64_t size = image->surface.htile_slice_size * layer_count; + uint64_t size = image->planes[0].surface.htile_slice_size * layer_count; VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; uint64_t offset = image->offset + image->htile_offset + - image->surface.htile_slice_size * range->baseArrayLayer; + image->planes[0].surface.htile_slice_size * range->baseArrayLayer; struct radv_cmd_state *state = &cmd_buffer->state; VkClearDepthStencilValue value = {}; @@ -4404,10 +4431,15 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe if (!radv_image_has_htile(image)) return; - if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && - radv_layout_has_htile(image, dst_layout, dst_queue_mask)) { - /* TODO: merge with the clear if applicable */ - radv_initialize_htile(cmd_buffer, image, range, 0); + if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { + uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; + + if (radv_layout_is_htile_compressed(image, dst_layout, + dst_queue_mask)) { + clear_value = 0; + } + + radv_initialize_htile(cmd_buffer, image, range, clear_value); } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; @@ -4553,17 +4585,28 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { + bool fce_eliminate = false, fmask_expand = false; + if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { - radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + fce_eliminate = true; } if (radv_image_has_fmask(image)) { if (src_layout != VK_IMAGE_LAYOUT_GENERAL && dst_layout == VK_IMAGE_LAYOUT_GENERAL) { - radv_expand_fmask_image_inplace(cmd_buffer, image, range); + /* A FMASK decompress is required before doing + * a MSAA decompress using FMASK. + */ + fmask_expand = true; } } + + if (fce_eliminate || fmask_expand) + radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + + if (fmask_expand) + radv_expand_fmask_image_inplace(cmd_buffer, image, range); } } @@ -4619,6 +4662,7 @@ struct radv_barrier_info { uint32_t eventCount; const VkEvent *pEvents; VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; }; static void @@ -4670,7 +4714,19 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, image); } - radv_stage_flush(cmd_buffer, info->srcStageMask); + /* The Vulkan spec 1.1.98 says: + * + * "An execution dependency with only + * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask + * will only prevent that stage from executing in subsequently + * submitted commands. As this stage does not perform any actual + * execution, this is not observable - in effect, it does not delay + * processing of subsequent commands. Similarly an execution dependency + * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask + * will effectively not wait for any prior commands to complete." + */ + if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) + radv_stage_flush(cmd_buffer, info->srcStageMask); cmd_buffer->state.flush_bits |= src_flush_bits; for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { @@ -4711,6 +4767,7 @@ void radv_CmdPipelineBarrier( info.eventCount = 0; info.pEvents = NULL; info.srcStageMask = srcStageMask; + info.dstStageMask = destStageMask; radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, @@ -4840,8 +4897,11 @@ void radv_CmdBeginConditionalRenderingEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; bool draw_visible = true; - uint64_t va; + uint64_t pred_value = 0; + uint64_t va, new_va; + unsigned pred_offset; va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset; @@ -4857,13 +4917,51 @@ void radv_CmdBeginConditionalRenderingEXT( si_emit_cache_flush(cmd_buffer); + /* From the Vulkan spec 1.1.107: + * + * "If the 32-bit value at offset in buffer memory is zero, then the + * rendering commands are discarded, otherwise they are executed as + * normal. If the value of the predicate in buffer memory changes while + * conditional rendering is active, the rendering commands may be + * discarded in an implementation-dependent way. Some implementations + * may latch the value of the predicate upon beginning conditional + * rendering while others may read it before every rendering command." + * + * But, the AMD hardware treats the predicate as a 64-bit value which + * means we need a workaround in the driver. Luckily, it's not required + * to support if the value changes when predication is active. + * + * The workaround is as follows: + * 1) allocate a 64-value in the upload BO and initialize it to 0 + * 2) copy the 32-bit predicate value to the upload BO + * 3) use the new allocated VA address for predication + * + * Based on the conditionalrender demo, it's faster to do the COPY_DATA + * in ME (+ sync PFP) instead of PFP. + */ + radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset); + + new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset; + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, new_va); + radeon_emit(cs, new_va >> 32); + + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); + radeon_emit(cs, 0); + /* Enable predication for this command buffer. */ - si_emit_set_predication_state(cmd_buffer, draw_visible, va); + si_emit_set_predication_state(cmd_buffer, draw_visible, new_va); cmd_buffer->state.predicating = true; /* Store conditional rendering user info. */ cmd_buffer->state.predication_type = draw_visible; - cmd_buffer->state.predication_va = va; + cmd_buffer->state.predication_va = new_va; } void radv_CmdEndConditionalRenderingEXT( @@ -4907,7 +5005,7 @@ void radv_CmdBindTransformFeedbackBuffersEXT( enabled_mask |= 1 << idx; } - cmd_buffer->state.streamout.enabled_mask = enabled_mask; + cmd_buffer->state.streamout.enabled_mask |= enabled_mask; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER; } @@ -4956,7 +5054,7 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) unsigned reg_strmout_cntl; /* The register is at different places on different ASICs. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL; radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0); } else { @@ -4997,7 +5095,7 @@ void radv_CmdBeginTransformFeedbackEXT( if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) counter_buffer_idx = -1; - /* SI binds streamout buffers as shader resources. + /* AMD GCN binds streamout buffers as shader resources. * VGT only counts primitives and tells the shader through * SGPRs what to do. */