X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=20ba975052bc6c35450d103387ccdcbbf3033c27;hb=1b8d99e2885456dcd2d9309f6e1bd7f60d30ed75;hp=215ccced1449a15af382d05ab1081f6e335f430d;hpb=946193ae00832fd26d883a1df7d695e64db6b1a5;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 215ccced144..20ba975052b 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -54,7 +54,9 @@ enum { static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, @@ -220,10 +222,11 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radv_shader_info *info; - if (!pipeline->streamout_shader) + if (!pipeline->streamout_shader || + cmd_buffer->device->physical_device->use_ngg_streamout) return; - info = &pipeline->streamout_shader->info.info; + info = &pipeline->streamout_shader->info; for (int i = 0; i < MAX_SO_BUFFERS; i++) so->stride_in_dw[i] = info->so.strides[i]; @@ -329,11 +332,14 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) } cmd_buffer->push_constant_stages = 0; - cmd_buffer->scratch_size_needed = 0; - cmd_buffer->compute_scratch_size_needed = 0; + cmd_buffer->scratch_size_per_wave_needed = 0; + cmd_buffer->scratch_waves_wanted = 0; + cmd_buffer->compute_scratch_size_per_wave_needed = 0; + cmd_buffer->compute_scratch_waves_wanted = 0; cmd_buffer->esgs_ring_size_needed = 0; cmd_buffer->gsvs_ring_size_needed = 0; cmd_buffer->tess_rings_needed = false; + cmd_buffer->gds_needed = false; cmd_buffer->sample_positions_needed = false; if (cmd_buffer->upload.upload_bo) @@ -364,12 +370,14 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) radv_buffer_get_va(cmd_buffer->upload.upload_bo); cmd_buffer->gfx9_fence_va += fence_offset; - /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, - &eop_bug_offset, &fence_ptr); - cmd_buffer->gfx9_eop_bug_va = - radv_buffer_get_va(cmd_buffer->upload.upload_bo); - cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { + /* Allocate a buffer for the EOP bug on GFX9. */ + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, + &eop_bug_offset, &fence_ptr); + cmd_buffer->gfx9_eop_bug_va = + radv_buffer_get_va(cmd_buffer->upload.upload_bo); + cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + } } cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL; @@ -550,8 +558,9 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer, assert(!"invalid ring type"); } - data[0] = (uintptr_t)pipeline; - data[1] = (uintptr_t)pipeline >> 32; + uint64_t pipeline_address = (uintptr_t)pipeline; + data[0] = pipeline_address; + data[1] = pipeline_address >> 32; radv_emit_write_data_packet(cmd_buffer, va, 2, data); } @@ -856,32 +865,60 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pipeline) { int num_samples = pipeline->graphics.ms.num_samples; - struct radv_multisample_state *ms = &pipeline->graphics.ms; struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; - if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) + if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions) cmd_buffer->sample_positions_needed = true; if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) return; - radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2); - radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); - radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); + radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); - radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} - radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); +static void +radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline) +{ + const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; - /* GFX9: Flush DFSM when the AA mode changes. */ - if (cmd_buffer->device->dfsm_allowed) { - radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); - radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); + + if (pipeline->device->physical_device->rad_info.chip_class < GFX9) + return; + + if (old_pipeline && + old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 && + old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control) + return; + + bool binning_flush = false; + if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 || + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + binning_flush = !old_pipeline || + G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) != + G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0); + } + + radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0, + pipeline->graphics.binning.pa_sc_binner_cntl_0 | + S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush)); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); + } else { + radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); } cmd_buffer->state.context_roll_without_scissor_emitted = true; } + static void radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer, struct radv_shader_variant *shader) @@ -929,7 +966,8 @@ radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer, if (mask & RADV_PREFETCH_GS) { radv_emit_shader_prefetch(cmd_buffer, pipeline->shaders[MESA_SHADER_GEOMETRY]); - radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); + if (radv_pipeline_has_gs_copy_shader(pipeline)) + radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); } if (mask & RADV_PREFETCH_PS) @@ -942,17 +980,19 @@ radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer, static void radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) { - if (!cmd_buffer->device->physical_device->rbplus_allowed) + if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed) return; struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; unsigned sx_ps_downconvert = 0; unsigned sx_blend_opt_epsilon = 0; unsigned sx_blend_opt_control = 0; + if (!cmd_buffer->state.attachments || !subpass) + return; + for (unsigned i = 0; i < subpass->color_count; ++i) { if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) { sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4); @@ -961,7 +1001,7 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) } int idx = subpass->color_attachments[i].attachment; - struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb; + struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb; unsigned format = G_028C70_FORMAT(cb->cb_color_info); unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info); @@ -1085,6 +1125,33 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.context_roll_without_scissor_emitted = true; } +static void +radv_emit_batch_break_on_new_ps(struct radv_cmd_buffer *cmd_buffer) +{ + if (!cmd_buffer->device->pbb_allowed) + return; + + struct radv_binning_settings settings = + radv_get_binning_settings(cmd_buffer->device->physical_device); + bool break_for_new_ps = + (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->shaders[MESA_SHADER_FRAGMENT] != + cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]) && + (settings.context_states_per_bin > 1 || + settings.persistent_states_per_bin > 1); + bool break_for_new_cb_target_mask = + (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->graphics.cb_target_mask != + cmd_buffer->state.pipeline->graphics.cb_target_mask) && + settings.context_states_per_bin > 1; + + if (!break_for_new_ps && !break_for_new_cb_target_mask) + return; + + radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); +} + static void radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) { @@ -1094,10 +1161,12 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) return; radv_update_multisample_state(cmd_buffer, pipeline); + radv_update_binning_state(cmd_buffer, pipeline); - cmd_buffer->scratch_size_needed = - MAX2(cmd_buffer->scratch_size_needed, - pipeline->max_waves * pipeline->scratch_bytes_per_wave); + cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed, + pipeline->scratch_bytes_per_wave); + cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted, + pipeline->max_waves); if (!cmd_buffer->state.emitted_pipeline || cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband != @@ -1115,6 +1184,8 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.context_roll_without_scissor_emitted = true; } + radv_emit_batch_break_on_new_ps(cmd_buffer); + for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) { if (!pipeline->shaders[i]) continue; @@ -1123,7 +1194,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) pipeline->shaders[i]->bo); } - if (radv_pipeline_has_gs(pipeline)) + if (radv_pipeline_has_gs_copy_shader(pipeline)) radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->gs_copy_shader->bo); @@ -1239,16 +1310,16 @@ radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer) static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, - struct radv_attachment_info *att, + struct radv_color_buffer_info *cb, struct radv_image_view *iview, - VkImageLayout layout) + VkImageLayout layout, + bool in_render_loop) { bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; - struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; struct radv_image *image = iview->image; - if (!radv_layout_dcc_compressed(image, layout, + if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop, radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index))) { @@ -1264,7 +1335,36 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; } - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); + radeon_emit(cmd_buffer->cs, cb->cb_color_base); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_view); + radeon_emit(cmd_buffer->cs, cb_color_info); + radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); + radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); + radeon_emit(cmd_buffer->cs, 0); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_base); + + radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4, + cb->cb_color_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4, + cb->cb_color_cmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4, + cb->cb_color_fmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4, + cb->cb_dcc_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4, + cb->cb_color_attrib2); + radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4, + cb->cb_color_attrib3); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32)); @@ -1320,16 +1420,19 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, static void radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, struct radv_ds_buffer_info *ds, - struct radv_image *image, VkImageLayout layout, - bool requires_cond_exec) + const struct radv_image_view *iview, + VkImageLayout layout, + bool in_render_loop, bool requires_cond_exec) { + const struct radv_image *image = iview->image; uint32_t db_z_info = ds->db_z_info; uint32_t db_z_info_reg; - if (!radv_image_is_tc_compat_htile(image)) + if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug || + !radv_image_is_tc_compat_htile(image)) return; - if (!radv_layout_has_htile(image, layout, + if (!radv_layout_has_htile(image, layout, in_render_loop, radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index))) { @@ -1338,7 +1441,7 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, db_z_info &= C_028040_ZRANGE_PRECISION; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { db_z_info_reg = R_028038_DB_Z_INFO; } else { db_z_info_reg = R_028040_DB_Z_INFO; @@ -1349,8 +1452,7 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, * SET_CONTEXT_REG packet. */ if (requires_cond_exec) { - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; + uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip); radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0)); radeon_emit(cmd_buffer->cs, va); @@ -1365,13 +1467,15 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, static void radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, struct radv_ds_buffer_info *ds, - struct radv_image *image, - VkImageLayout layout) + struct radv_image_view *iview, + VkImageLayout layout, + bool in_render_loop) { + const struct radv_image *image = iview->image; uint32_t db_z_info = ds->db_z_info; uint32_t db_stencil_info = ds->db_stencil_info; - if (!radv_layout_has_htile(image, layout, + if (!radv_layout_has_htile(image, layout, in_render_loop, radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index))) { @@ -1382,8 +1486,26 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); - - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); + radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7); + radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1)); + radeon_emit(cmd_buffer->cs, db_z_info); + radeon_emit(cmd_buffer->cs, db_stencil_info); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3); radeon_emit(cmd_buffer->cs, ds->db_htile_data_base); radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32)); @@ -1421,7 +1543,8 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, } /* Update the ZRANGE_PRECISION value for the TC-compat bug. */ - radv_update_zrange_precision(cmd_buffer, ds, image, layout, true); + radv_update_zrange_precision(cmd_buffer, ds, iview, layout, + in_render_loop, true); radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, ds->pa_su_poly_offset_db_fmt_cntl); @@ -1433,30 +1556,38 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, */ static void radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; + const struct radv_image *image = iview->image; struct radeon_cmdbuf *cs = cmd_buffer->cs; - struct radv_attachment_info *att; uint32_t att_idx; - if (!framebuffer || !subpass) + if (!cmd_buffer->state.attachments || !subpass) return; if (!subpass->depth_stencil_attachment) return; att_idx = subpass->depth_stencil_attachment->attachment; - att = &framebuffer->attachments[att_idx]; - if (att->attachment->image != image) + if (cmd_buffer->state.attachments[att_idx].iview->image != image) return; - radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2); - radeon_emit(cs, ds_clear_value.stencil); - radeon_emit(cs, fui(ds_clear_value.depth)); + if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT)) { + radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2); + radeon_emit(cs, ds_clear_value.stencil); + radeon_emit(cs, fui(ds_clear_value.depth)); + } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { + radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1); + radeon_emit(cs, fui(ds_clear_value.depth)); + } else { + assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT); + radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1); + radeon_emit(cs, ds_clear_value.stencil); + } /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is * only needed when clearing Z to 0.0. @@ -1464,9 +1595,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && ds_clear_value.depth == 0.0) { VkImageLayout layout = subpass->depth_stencil_attachment->layout; + bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop; - radv_update_zrange_precision(cmd_buffer, &att->ds, image, - layout, false); + radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds, + iview, layout, in_render_loop, false); } cmd_buffer->state.context_roll_without_scissor_emitted = true; @@ -1478,34 +1610,51 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, static void radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - unsigned reg_offset = 0, reg_count = 0; + uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); - va += image->offset + image->clear_value_offset; + if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT)) { + /* Use the fastest way when both aspects are used. */ + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); - if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { - ++reg_count; + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, ds_clear_value.stencil); + radeon_emit(cs, fui(ds_clear_value.depth)); + } } else { - ++reg_offset; - va += 4; - } - if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) - ++reg_count; + /* Otherwise we need one WRITE_DATA packet per level. */ + for (uint32_t l = 0; l < level_count; l++) { + uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l); + unsigned value; + + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { + value = fui(ds_clear_value.depth); + va += 4; + } else { + assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT); + value = ds_clear_value.stencil; + } - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | - S_370_WR_CONFIRM(1) | - S_370_ENGINE_SEL(V_370_PFP)); - radeon_emit(cs, va); - radeon_emit(cs, va >> 32); - if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) - radeon_emit(cs, ds_clear_value.stencil); - if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) - radeon_emit(cs, fui(ds_clear_value.depth)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, value); + } + } } /** @@ -1514,28 +1663,40 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, static void radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug) + return; + + uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, value); + + for (uint32_t l = 0; l < level_count; l++) + radeon_emit(cs, value); } static void radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value) { - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; uint32_t cond_val; /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last @@ -1543,7 +1704,8 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, */ cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0; - radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val); + radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range, + cond_val); } /** @@ -1551,22 +1713,32 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + struct radv_image *image = iview->image; + assert(radv_image_has_htile(image)); - radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects); + radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range, + ds_clear_value, aspects); if (radv_image_is_tc_compat_htile(image) && (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { - radv_update_tc_compat_zrange_metadata(cmd_buffer, image, + radv_update_tc_compat_zrange_metadata(cmd_buffer, iview, ds_clear_value); } - radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value, - aspects); + radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value, + aspects); } /** @@ -1574,15 +1746,14 @@ radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image) + const struct radv_image_view *iview) { struct radeon_cmdbuf *cs = cmd_buffer->cs; + const struct radv_image *image = iview->image; VkImageAspectFlags aspects = vk_format_aspects(image->vk_format); - uint64_t va = radv_buffer_get_va(image->bo); + uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip); unsigned reg_offset = 0, reg_count = 0; - va += image->offset + image->clear_value_offset; - if (!radv_image_has_htile(image)) return; @@ -1597,7 +1768,7 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { + if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -1685,21 +1856,18 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, int cb_idx, uint32_t color_values[2]) { - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; struct radeon_cmdbuf *cs = cmd_buffer->cs; - struct radv_attachment_info *att; uint32_t att_idx; - if (!framebuffer || !subpass) + if (!cmd_buffer->state.attachments || !subpass) return; att_idx = subpass->color_attachments[cb_idx].attachment; if (att_idx == VK_ATTACHMENT_UNUSED) return; - att = &framebuffer->attachments[att_idx]; - if (att->attachment->image != image) + if (cmd_buffer->state.attachments[att_idx].iview->image != image) return; radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2); @@ -1784,7 +1952,7 @@ radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { + if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -1811,7 +1979,6 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int i; struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; - unsigned num_bpp64_colorbufs = 0; /* this may happen for inherited secondary recording */ if (!framebuffer) @@ -1825,45 +1992,42 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) } int idx = subpass->color_attachments[i].attachment; - struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image_view *iview = att->attachment; - struct radv_image *image = iview->image; + struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview; VkImageLayout layout = subpass->color_attachments[i].layout; + bool in_render_loop = subpass->color_attachments[i].in_render_loop; - radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo); - assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); - radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); + radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop); radv_load_color_clear_metadata(cmd_buffer, iview, i); - - if (image->planes[0].surface.bpe >= 8) - num_bpp64_colorbufs++; } if (subpass->depth_stencil_attachment) { int idx = subpass->depth_stencil_attachment->attachment; VkImageLayout layout = subpass->depth_stencil_attachment->layout; - struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; - radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image, + bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop; + struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview; + struct radv_image *image = iview->image; + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo); + ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index); /* We currently don't support writing decompressed HTILE */ - assert(radv_layout_has_htile(image, layout, queue_mask) == - radv_layout_is_htile_compressed(image, layout, queue_mask)); + assert(radv_layout_has_htile(image, layout, in_render_loop, queue_mask) == + radv_layout_is_htile_compressed(image, layout, in_render_loop, queue_mask)); - radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); + radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop); - if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { + if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) { cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; - cmd_buffer->state.offset_scale = att->ds.offset_scale; + cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale; } - radv_load_ds_clear_metadata(cmd_buffer, image); + radv_load_ds_clear_metadata(cmd_buffer, iview); } else { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2); else radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); @@ -1876,20 +2040,16 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_Y(framebuffer->height)); if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { - uint8_t watermark = 4; /* Default value for GFX8. */ - - /* For optimal DCC performance. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - if (num_bpp64_colorbufs >= 5) { - watermark = 8; - } else { - watermark = 6; - } - } + bool disable_constant_encode = + cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode; + enum chip_class chip_class = + cmd_buffer->device->physical_device->rad_info.chip_class; + uint8_t watermark = chip_class >= GFX10 ? 6 : 4; radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL, - S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | - S_028424_OVERWRITE_COMBINER_WATERMARK(watermark)); + S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) | + S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) | + S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode)); } if (cmd_buffer->device->dfsm_allowed) { @@ -1908,7 +2068,8 @@ radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer) if (state->index_type != state->last_index_type) { if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE, + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_03090C_VGT_INDEX_TYPE, 2, state->index_type); } else { radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); @@ -1955,10 +2116,12 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) } else { const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; + bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries; if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | + S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) | S_028004_SAMPLE_RATE(sample_rate) | S_028004_ZPASS_ENABLE(1) | S_028004_SLICE_EVEN_ENABLE(1) | @@ -1998,7 +2161,7 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) radv_emit_viewport(cmd_buffer); if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) && - !cmd_buffer->device->physical_device->has_scissor_bug) + !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug) radv_emit_scissor(cmd_buffer); if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) @@ -2125,7 +2288,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, if (flush_indirect_descriptors) radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, MAX_SETS * MESA_SHADER_STAGES * 4); @@ -2182,14 +2345,15 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, return; radv_foreach_stage(stage, stages) { - if (!pipeline->shaders[stage]) + shader = radv_get_shader(pipeline, stage); + if (!shader) continue; - need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants; - need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets; + need_push_constants |= shader->info.loads_push_constants; + need_push_constants |= shader->info.loads_dynamic_offsets; - uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts; - uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts; + uint8_t base = shader->info.base_inline_push_consts; + uint8_t count = shader->info.num_inline_push_consts; radv_emit_inline_push_consts(cmd_buffer, pipeline, stage, AC_UD_INLINE_PUSH_CONSTANTS, @@ -2211,7 +2375,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); va += offset; - MAYBE_UNUSED unsigned cdw_max = + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, MESA_SHADER_STAGES * 4); @@ -2240,8 +2404,7 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && cmd_buffer->state.pipeline->num_vertex_bindings && - radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { - struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; + radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) { unsigned vb_offset; void *vb_ptr; uint32_t i = 0; @@ -2258,6 +2421,7 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, uint32_t offset; struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + unsigned num_records; if (!buffer) continue; @@ -2266,18 +2430,33 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; + + num_records = buffer->size - offset; + if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride) + num_records /= stride; + desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) - desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; - else - desc[2] = buffer->size - offset; + desc[2] = num_records; desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + /* OOB_SELECT chooses the out-of-bounds check: + * - 1: index >= NUM_RECORDS (Structured) + * - 3: offset >= NUM_RECORDS (Raw) + */ + int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW; + + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) | + S_008F0C_OOB_SELECT(oob_select) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2315,7 +2494,7 @@ radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va) base_reg + loc->sgpr_idx * 4, va, false); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS]; if (loc->sgpr_idx != -1) { base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -2359,14 +2538,30 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) * the buffer will be considered not bound and store * instructions will be no-ops. */ + uint32_t size = 0xffffffff; + + /* Compute the correct buffer size for NGG streamout + * because it's used to determine the max emit per + * buffer. + */ + if (cmd_buffer->device->physical_device->use_ngg_streamout) + size = buffer->size - sb[i].offset; + desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); - desc[2] = 0xffffffff; + desc[2] = size; desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2438,26 +2633,42 @@ struct radv_draw_info { uint64_t strmout_buffer_offset; }; +static uint32_t +radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) +{ + switch (cmd_buffer->state.index_type) { + case V_028A7C_VGT_INDEX_8: + return 0xffu; + case V_028A7C_VGT_INDEX_16: + return 0xffffu; + case V_028A7C_VGT_INDEX_32: + return 0xffffffffu; + default: + unreachable("invalid index type"); + } +} + static void -radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, - const struct radv_draw_info *draw_info) +si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, + bool instanced_draw, bool indirect_draw, + bool count_from_stream_output, + uint32_t draw_vertex_count) { struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; struct radv_cmd_state *state = &cmd_buffer->state; struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint32_t ia_multi_vgt_param; - int32_t primitive_reset_en; + unsigned ia_multi_vgt_param; - /* Draw state. */ ia_multi_vgt_param = - si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, - draw_info->indirect, - !!draw_info->strmout_buffer, - draw_info->indirect ? 0 : draw_info->count); + si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, + indirect_draw, + count_from_stream_output, + draw_vertex_count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { - if (info->chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, + if (info->chip_class == GFX9) { + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); } else if (info->chip_class >= GFX7) { @@ -2470,6 +2681,24 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, } state->last_ia_multi_vgt_param = ia_multi_vgt_param; } +} + +static void +radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, + const struct radv_draw_info *draw_info) +{ + struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; + struct radv_cmd_state *state = &cmd_buffer->state; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + int32_t primitive_reset_en; + + /* Draw state. */ + if (info->chip_class < GFX10) { + si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, + draw_info->indirect, + !!draw_info->strmout_buffer, + draw_info->indirect ? 0 : draw_info->count); + } /* Primitive restart. */ primitive_reset_en = @@ -2490,7 +2719,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, if (primitive_reset_en) { uint32_t primitive_reset_index = - state->index_type ? 0xffffffffu : 0xffffu; + radv_get_primitive_reset_index(cmd_buffer); if (primitive_reset_index != state->last_primitive_reset_index) { radeon_set_context_reg(cs, @@ -2573,7 +2802,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_SHADER_WRITE_BIT: case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT: case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: - flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_WB_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; @@ -2588,7 +2817,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFER_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB | - RADV_CMD_FLAG_INV_GLOBAL_L2; + RADV_CMD_FLAG_INV_L2; if (flush_CB_meta) flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -2624,7 +2853,9 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, if (!radv_image_has_htile(image)) flush_DB_meta = false; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + /* TODO: implement shader coherent for GFX10 */ + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { if (image->info.samples == 1 && (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) && @@ -2645,19 +2876,24 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: break; case VK_ACCESS_UNIFORM_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE; break; case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: case VK_ACCESS_TRANSFER_READ_BIT: case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | - RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | + RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_SHADER_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE; + /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to + * invalidate the scalar cache. */ + if (cmd_buffer->device->physical_device->use_aco && + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) + flush_bits |= RADV_CMD_FLAG_INV_SCACHE; if (!image_is_coherent) - flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: if (flush_CB) @@ -2708,7 +2944,7 @@ radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, { struct radv_cmd_state *state = &cmd_buffer->state; uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); - struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + struct radv_image_view *view = state->attachments[att_idx].iview; if (view->image->info.samples == 1) return NULL; @@ -2747,10 +2983,10 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf bool begin_subpass) { unsigned idx = att.attachment; - struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview; struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; - range.aspectMask = 0; + range.aspectMask = view->aspect_mask; range.baseMipLevel = view->base_mip; range.levelCount = 1; range.baseArrayLayer = view->base_layer; @@ -2773,12 +3009,49 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, begin_subpass); - radv_handle_image_transition(cmd_buffer, - view->image, - cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range, sample_locs); + /* Determine if the subpass uses separate depth/stencil layouts. */ + bool uses_separate_depth_stencil_layouts = false; + if ((cmd_buffer->state.attachments[idx].current_layout != + cmd_buffer->state.attachments[idx].current_stencil_layout) || + (att.layout != att.stencil_layout)) { + uses_separate_depth_stencil_layouts = true; + } + + /* For separate layouts, perform depth and stencil transitions + * separately. + */ + if (uses_separate_depth_stencil_layouts && + (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT))) { + /* Depth-only transitions. */ + range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.layout, att.in_render_loop, + 0, 0, &range, sample_locs); + + /* Stencil-only transitions. */ + range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_stencil_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.stencil_layout, att.in_render_loop, + 0, 0, &range, sample_locs); + } else { + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.layout, att.in_render_loop, + 0, 0, &range, sample_locs); + } cmd_buffer->state.attachments[idx].current_layout = att.layout; + cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout; + cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop; } @@ -2801,7 +3074,6 @@ radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, vk_find_struct_const(info->pNext, RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); struct radv_cmd_state *state = &cmd_buffer->state; - struct radv_framebuffer *framebuffer = state->framebuffer; if (!sample_locs) { state->subpass_sample_locs = NULL; @@ -2812,8 +3084,7 @@ radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, const VkAttachmentSampleLocationsEXT *att_sample_locs = &sample_locs->pAttachmentInitialSampleLocations[i]; uint32_t att_idx = att_sample_locs->attachmentIndex; - struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; - struct radv_image *image = att->attachment->image; + struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image; assert(vk_format_is_depth_or_stencil(image->vk_format)); @@ -2881,6 +3152,13 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, const VkRenderPassBeginInfo *info) { struct radv_cmd_state *state = &cmd_buffer->state; + const struct VkRenderPassAttachmentBeginInfo *attachment_info = NULL; + + if (info) { + attachment_info = vk_find_struct_const(info->pNext, + RENDER_PASS_ATTACHMENT_BEGIN_INFO); + } + if (pass->attachment_count == 0) { state->attachments = NULL; @@ -2929,7 +3207,22 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].current_stencil_layout = att->stencil_initial_layout; state->attachments[i].sample_location.count = 0; + + struct radv_image_view *iview; + if (attachment_info && attachment_info->attachmentCount > i) { + iview = radv_image_view_from_handle(attachment_info->pAttachments[i]); + } else { + iview = state->framebuffer->attachments[i]; + } + + state->attachments[i].iview = iview; + if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { + radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview); + } else { + radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview); + } } return VK_SUCCESS; @@ -2948,7 +3241,7 @@ VkResult radv_AllocateCommandBuffers( for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { - if (!list_empty(&pool->free_cmd_buffers)) { + if (!list_is_empty(&pool->free_cmd_buffers)) { struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link); list_del(&cmd_buffer->pool_link); @@ -3049,9 +3342,11 @@ VkResult radv_BeginCommandBuffer( struct radv_subpass *subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; - result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); - if (result != VK_SUCCESS) - return result; + if (cmd_buffer->state.framebuffer) { + result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); + if (result != VK_SUCCESS) + return result; + } radv_cmd_buffer_set_subpass(cmd_buffer, subpass); } @@ -3109,6 +3404,36 @@ void radv_CmdBindVertexBuffers( cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER; } +static uint32_t +vk_to_index_type(VkIndexType type) +{ + switch (type) { + case VK_INDEX_TYPE_UINT8_EXT: + return V_028A7C_VGT_INDEX_8; + case VK_INDEX_TYPE_UINT16: + return V_028A7C_VGT_INDEX_16; + case VK_INDEX_TYPE_UINT32: + return V_028A7C_VGT_INDEX_32; + default: + unreachable("invalid index type"); + } +} + +static uint32_t +radv_get_vgt_index_size(uint32_t type) +{ + switch (type) { + case V_028A7C_VGT_INDEX_8: + return 1; + case V_028A7C_VGT_INDEX_16: + return 2; + case V_028A7C_VGT_INDEX_32: + return 4; + default: + unreachable("invalid index type"); + } +} + void radv_CmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, @@ -3127,12 +3452,12 @@ void radv_CmdBindIndexBuffer( cmd_buffer->state.index_buffer = index_buffer; cmd_buffer->state.index_offset = offset; - cmd_buffer->state.index_type = indexType; /* vk matches hw */ + cmd_buffer->state.index_type = vk_to_index_type(indexType); cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo); cmd_buffer->state.index_va += index_buffer->offset + offset; - int index_size_shift = cmd_buffer->state.index_type ? 2 : 1; - cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift; + int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType)); + cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo); } @@ -3181,7 +3506,13 @@ void radv_CmdBindDescriptorSets( for (unsigned i = 0; i < descriptorSetCount; ++i) { unsigned idx = i + firstSet; RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); - radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx); + + /* If the set is already bound we only need to update the + * (potentially changed) dynamic offsets. */ + if (descriptors_state->sets[idx] != set || + !(descriptors_state->valid & (1u << idx))) { + radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx); + } for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start; @@ -3196,9 +3527,17 @@ void radv_CmdBindDescriptorSets( dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } + cmd_buffer->push_constant_stages |= set->layout->dynamic_shader_stages; } @@ -3293,7 +3632,7 @@ void radv_CmdPushDescriptorSetKHR( * because it is invalid, according to Vulkan spec. */ for (int i = 0; i < descriptorWriteCount; i++) { - MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; + ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); } @@ -3352,13 +3691,20 @@ VkResult radv_EndCommandBuffer( if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) - cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2; /* Make sure to sync all pending active queries at the end of * command buffer. */ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; + /* Since NGG streamout uses GDS, we need to make GDS idle when + * we leave the IB, otherwise another process might overwrite + * it while our shaders are busy. + */ + if (cmd_buffer->gds_needed) + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; + si_emit_cache_flush(cmd_buffer); } @@ -3393,9 +3739,10 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw); radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw); - cmd_buffer->compute_scratch_size_needed = - MAX2(cmd_buffer->compute_scratch_size_needed, - pipeline->max_waves * pipeline->scratch_bytes_per_wave); + cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed, + pipeline->scratch_bytes_per_wave); + cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted, + pipeline->max_waves); radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->shaders[MESA_SHADER_COMPUTE]->bo); @@ -3449,6 +3796,20 @@ void radv_CmdBindPipeline( /* Prefetch all pipeline shaders at first draw time. */ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS; + if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) && + cmd_buffer->state.emitted_pipeline && + radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) && + !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) { + /* Transitioning from NGG to legacy GS requires + * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted + * at the beginning of IBs when legacy GS ring pointers + * are set. + */ + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH; + } + radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state); radv_bind_streamout_state(cmd_buffer, pipeline); @@ -3474,7 +3835,7 @@ void radv_CmdSetViewport( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount; + ASSERTED const uint32_t total_count = firstViewport + viewportCount; assert(firstViewport < MAX_VIEWPORTS); assert(total_count >= 1 && total_count <= MAX_VIEWPORTS); @@ -3498,7 +3859,7 @@ void radv_CmdSetScissor( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount; + ASSERTED const uint32_t total_count = firstScissor + scissorCount; assert(firstScissor < MAX_SCISSORS); assert(total_count >= 1 && total_count <= MAX_SCISSORS); @@ -3660,7 +4021,7 @@ void radv_CmdSetDiscardRectangleEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; + ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES); assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES); @@ -3710,10 +4071,14 @@ void radv_CmdExecuteCommands( for (uint32_t i = 0; i < commandBufferCount; i++) { RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); - primary->scratch_size_needed = MAX2(primary->scratch_size_needed, - secondary->scratch_size_needed); - primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed, - secondary->compute_scratch_size_needed); + primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed, + secondary->scratch_size_per_wave_needed); + primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted, + secondary->scratch_waves_wanted); + primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed, + secondary->compute_scratch_size_per_wave_needed); + primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted, + secondary->compute_scratch_waves_wanted); if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed) primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed; @@ -3883,7 +4248,7 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, struct radv_cmd_state *state = &cmd_buffer->state; struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); @@ -3923,7 +4288,8 @@ radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) continue; VkImageLayout layout = state->pass->attachments[a].final_layout; - struct radv_subpass_attachment att = { a, layout }; + VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout; + struct radv_subpass_attachment att = { a, layout, stencil_layout }; radv_handle_subpass_image_transition(cmd_buffer, att, false); } } @@ -3953,10 +4319,10 @@ void radv_CmdBeginRenderPass( radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } -void radv_CmdBeginRenderPass2KHR( +void radv_CmdBeginRenderPass2( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBeginInfo, - const VkSubpassBeginInfoKHR* pSubpassBeginInfo) + const VkSubpassBeginInfo* pSubpassBeginInfo) { radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo, pSubpassBeginInfo->contents); @@ -3973,10 +4339,10 @@ void radv_CmdNextSubpass( radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1); } -void radv_CmdNextSubpass2KHR( +void radv_CmdNextSubpass2( VkCommandBuffer commandBuffer, - const VkSubpassBeginInfoKHR* pSubpassBeginInfo, - const VkSubpassEndInfoKHR* pSubpassEndInfo) + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo) { radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents); } @@ -3995,7 +4361,7 @@ static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned in radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX]; if (loc->sgpr_idx != -1) { uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -4038,7 +4404,7 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer, struct radeon_cmdbuf *cs = cmd_buffer->cs; unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX; - bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id; + bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id; uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr; bool predicating = cmd_buffer->state.predicating; assert(base_reg); @@ -4143,9 +4509,15 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, } if (info->indexed) { - int index_size = state->index_type ? 4 : 2; + int index_size = radv_get_vgt_index_size(state->index_type); uint64_t index_va; + /* Skip draw calls with 0-sized index buffers. They + * cause a hang on some chips, like Navi10-14. + */ + if (!cmd_buffer->state.max_index_count) + return; + index_va = state->index_va; index_va += info->first_index * index_size; @@ -4203,7 +4575,7 @@ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, { struct radv_cmd_state *state = &cmd_buffer->state; - if (!cmd_buffer->device->physical_device->has_scissor_bug) + if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug) return false; if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer) @@ -4222,8 +4594,11 @@ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & used_states) return true; + uint32_t primitive_reset_index = + radv_get_primitive_reset_index(cmd_buffer); + if (info->indexed && state->pipeline->graphics.prim_restart_enable && - (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index) + primitive_reset_index != state->last_primitive_reset_index) return true; return false; @@ -4285,7 +4660,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; - MAYBE_UNUSED unsigned cdw_max = + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); @@ -4453,7 +4828,7 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountKHR( +void radv_CmdDrawIndirectCount( VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -4477,7 +4852,7 @@ void radv_CmdDrawIndirectCountKHR( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndexedIndirectCountKHR( +void radv_CmdDrawIndexedIndirectCount( VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -4540,7 +4915,12 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25); + ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25); + + if (compute_shader->info.wave_size == 32) { + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + dispatch_initiator |= S_00B800_CS_W32_EN(1); + } if (info->indirect) { uint64_t va = radv_buffer_get_va(info->indirect->bo); @@ -4801,9 +5181,9 @@ void radv_CmdEndRenderPass( cmd_buffer->state.subpass_sample_locs = NULL; } -void radv_CmdEndRenderPass2KHR( +void radv_CmdEndRenderPass2( VkCommandBuffer commandBuffer, - const VkSubpassEndInfoKHR* pSubpassEndInfo) + const VkSubpassEndInfo* pSubpassEndInfo) { radv_CmdEndRenderPass(commandBuffer); } @@ -4817,26 +5197,26 @@ void radv_CmdEndRenderPass2KHR( */ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, - const VkImageSubresourceRange *range, - uint32_t clear_word) + const VkImageSubresourceRange *range) { assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; VkClearDepthStencilValue value = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word); + state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; if (vk_format_is_stencil(image->vk_format)) aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; - radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects); + radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects); if (radv_image_is_tc_compat_htile(image)) { /* Initialize the TC-compat metada value to 0 because by @@ -4844,14 +5224,16 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, * need have to conditionally update its value when performing * a fast depth clear. */ - radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0); + radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0); } } static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range, @@ -4861,30 +5243,17 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe return; if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { - uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; - - if (radv_layout_is_htile_compressed(image, dst_layout, - dst_queue_mask)) { - clear_value = 0; - } - - radv_initialize_htile(cmd_buffer, image, range, clear_value); - } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && - radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { - uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; - radv_initialize_htile(cmd_buffer, image, range, clear_value); - } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && - !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { - VkImageSubresourceRange local_range = *range; - local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; - local_range.baseMipLevel = 0; - local_range.levelCount = 1; - + radv_initialize_htile(cmd_buffer, image, range); + } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) && + radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) { + radv_initialize_htile(cmd_buffer, image, range); + } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) { cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, - &local_range, sample_locs); + radv_decompress_depth_image_inplace(cmd_buffer, image, range, + sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4892,20 +5261,23 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe } static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, + uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image) + struct radv_image *image, + const VkImageSubresourceRange *range) { struct radv_cmd_state *state = &cmd_buffer->state; static const uint32_t fmask_clear_values[4] = { @@ -4920,7 +5292,7 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } @@ -4930,56 +5302,38 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; - uint32_t level_count = radv_get_levelCount(image, range); unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - /* Mipmap level aren't implemented. */ - assert(level_count == 1); - state->flush_bits |= radv_clear_dcc(cmd_buffer, image, - range, value); - } else { - /* Initialize the mipmap levels with DCC first. */ - for (unsigned l = 0; l < level_count; l++) { - uint32_t level = range->baseMipLevel + l; - struct legacy_surf_level *surf_level = - &image->planes[0].surface.u.legacy.level[level]; - - if (!surf_level->dcc_fast_clear_size) - break; - - state->flush_bits |= - radv_dcc_clear_level(cmd_buffer, image, - level, value); - } + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value); + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) { /* When DCC is enabled with mipmaps, some levels might not * support fast clears and we have to initialize them as "fully * expanded". */ - if (image->planes[0].surface.num_dcc_levels > 1) { - /* Compute the size of all fast clearable DCC levels. */ - for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { - struct legacy_surf_level *surf_level = - &image->planes[0].surface.u.legacy.level[i]; + /* Compute the size of all fast clearable DCC levels. */ + for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[i]; + unsigned dcc_fast_clear_size = + surf_level->dcc_slice_fast_clear_size * image->info.array_size; - if (!surf_level->dcc_fast_clear_size) - break; + if (!dcc_fast_clear_size) + break; - size = surf_level->dcc_offset + surf_level->dcc_fast_clear_size; - } + size = surf_level->dcc_offset + dcc_fast_clear_size; + } - /* Initialize the mipmap levels without DCC. */ - if (size != image->planes[0].surface.dcc_size) { - state->flush_bits |= - radv_fill_buffer(cmd_buffer, image->bo, - image->offset + image->dcc_offset + size, - image->planes[0].surface.dcc_size - size, - 0xffffffff); - } + /* Initialize the mipmap levels without DCC. */ + if (size != image->planes[0].surface.dcc_size) { + state->flush_bits |= + radv_fill_buffer(cmd_buffer, image->bo, + image->offset + image->dcc_offset + size, + image->planes[0].surface.dcc_size - size, + 0xffffffff); } } @@ -4993,7 +5347,9 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range) @@ -5006,18 +5362,19 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, value = 0xccccccccu; } - radv_initialise_cmask(cmd_buffer, image, value); + radv_initialise_cmask(cmd_buffer, image, range, value); } if (radv_image_has_fmask(image)) { - radv_initialize_fmask(cmd_buffer, image); + radv_initialize_fmask(cmd_buffer, image, range); } if (radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; - if (radv_layout_dcc_compressed(image, dst_layout, + if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, + dst_render_loop, dst_queue_mask)) { value = 0x20202020u; need_decompress_pass = true; @@ -5043,14 +5400,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range) { if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range); return; @@ -5059,18 +5419,18 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); - } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && - !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { + } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); - } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && - !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { + } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) { radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { bool fce_eliminate = false, fmask_expand = false; - if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && - !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { + if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) { fce_eliminate = true; } @@ -5095,7 +5455,9 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, @@ -5109,7 +5471,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, assert(src_family == cmd_buffer->queue_family_index || dst_family == cmd_buffer->queue_family_index); - if (src_family == VK_QUEUE_FAMILY_EXTERNAL) + if (src_family == VK_QUEUE_FAMILY_EXTERNAL || + src_family == VK_QUEUE_FAMILY_FOREIGN_EXT) return; if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) @@ -5133,12 +5496,14 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, if (vk_format_is_depth(image->vk_format)) { radv_handle_depth_image_transition(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range); } @@ -5171,7 +5536,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff); assert(cmd_buffer->cs->cdw <= cdw_max); @@ -5235,7 +5600,9 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, + false, /* Outside of a renderpass we are never in a renderloop */ pImageMemoryBarriers[i].newLayout, + false, /* Outside of a renderpass we are never in a renderloop */ pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, &pImageMemoryBarriers[i].subresourceRange, @@ -5290,7 +5657,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); /* Flags that only require a top-of-pipe event. */ VkPipelineStageFlags top_of_pipe_flags = @@ -5335,6 +5702,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, va, value, cmd_buffer->gfx9_eop_bug_va); } @@ -5546,9 +5914,13 @@ radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable) (so->enabled_mask << 8) | (so->enabled_mask << 12); - if ((old_streamout_enabled != so->streamout_enabled) || - (old_hw_enabled_mask != so->hw_enabled_mask)) + if (!cmd_buffer->device->physical_device->use_ngg_streamout && + ((old_streamout_enabled != so->streamout_enabled) || + (old_hw_enabled_mask != so->hw_enabled_mask))) radv_emit_streamout_enable(cmd_buffer); + + if (cmd_buffer->device->physical_device->use_ngg_streamout) + cmd_buffer->gds_needed = true; } static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) @@ -5577,14 +5949,14 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) radeon_emit(cs, 4); /* poll interval */ } -void radv_CmdBeginTransformFeedbackEXT( - VkCommandBuffer commandBuffer, - uint32_t firstCounterBuffer, - uint32_t counterBufferCount, - const VkBuffer* pCounterBuffers, - const VkDeviceSize* pCounterBufferOffsets) +static void +radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) + { - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; @@ -5642,7 +6014,63 @@ void radv_CmdBeginTransformFeedbackEXT( radv_set_streamout_enable(cmd_buffer, true); } -void radv_CmdEndTransformFeedbackEXT( +static void +gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + unsigned last_target = util_last_bit(so->enabled_mask) - 1; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + + /* Sync because the next streamout operation will overwrite GDS and we + * have to make sure it's idle. + * TODO: Improve by tracking if there is a streamout operation in + * flight. + */ + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; + si_emit_cache_flush(cmd_buffer); + + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + bool append = counter_buffer_idx >= 0 && + pCounterBuffers && pCounterBuffers[counter_buffer_idx]; + uint64_t va = 0; + + if (append) { + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + + va += radv_buffer_get_va(buffer->bo); + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } + + radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); + radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) | + S_411_DST_SEL(V_411_GDS) | + S_411_CP_SYNC(i == last_target)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, 4 * i); /* destination in GDS */ + radeon_emit(cs, 0); + radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) | + S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target)); + } + + radv_set_streamout_enable(cmd_buffer, true); +} + +void radv_CmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, @@ -5650,6 +6078,25 @@ void radv_CmdEndTransformFeedbackEXT( const VkDeviceSize* pCounterBufferOffsets) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + if (cmd_buffer->device->physical_device->use_ngg_streamout) { + gfx10_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } else { + radv_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } +} + +static void +radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; uint32_t i; @@ -5695,6 +6142,67 @@ void radv_CmdEndTransformFeedbackEXT( radv_set_streamout_enable(cmd_buffer, false); } +static void +gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) { + /* The array of counters buffer is optional. */ + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + uint64_t va = radv_buffer_get_va(buffer->bo); + + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + si_cs_emit_write_event_eop(cs, + cmd_buffer->device->physical_device->rad_info.chip_class, + radv_cmd_buffer_uses_mec(cmd_buffer), + V_028A90_PS_DONE, 0, + EOP_DST_SEL_TC_L2, + EOP_DATA_SEL_GDS, + va, EOP_DATA_GDS(i, 1), 0); + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } + } + + radv_set_streamout_enable(cmd_buffer, false); +} + +void radv_CmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + if (cmd_buffer->device->physical_device->use_ngg_streamout) { + gfx10_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } else { + radv_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } +} + void radv_CmdDrawIndirectByteCountEXT( VkCommandBuffer commandBuffer, uint32_t instanceCount, @@ -5732,6 +6240,8 @@ void radv_CmdWriteBufferMarkerAMD( si_emit_cache_flush(cmd_buffer); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12); + if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) { radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | @@ -5746,8 +6256,11 @@ void radv_CmdWriteBufferMarkerAMD( cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, va, marker, cmd_buffer->gfx9_eop_bug_va); } + + assert(cmd_buffer->cs->cdw <= cdw_max); }