X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=a833fc4221b8afcb404e326885f8bd5aae8118ed;hb=abc226cf41574454c79477c217e60e8ff1fddfad;hp=b570bda35aedb4b9ec52292abf0ab817626080b8;hpb=b9d3a6b6565952855bc6609f22986ee25638dada;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index b570bda35ae..a833fc4221b 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -31,6 +31,7 @@ #include "radv_cs.h" #include "sid.h" #include "vk_format.h" +#include "vk_util.h" #include "radv_debug.h" #include "radv_meta.h" @@ -56,7 +57,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range); + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs); const struct radv_dynamic_state default_dynamic_state = { .viewport = { @@ -362,12 +364,14 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) radv_buffer_get_va(cmd_buffer->upload.upload_bo); cmd_buffer->gfx9_fence_va += fence_offset; - /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, - &eop_bug_offset, &fence_ptr); - cmd_buffer->gfx9_eop_bug_va = - radv_buffer_get_va(cmd_buffer->upload.upload_bo); - cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { + /* Allocate a buffer for the EOP bug on GFX9. */ + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, + &eop_bug_offset, &fence_ptr); + cmd_buffer->gfx9_eop_bug_va = + radv_buffer_get_va(cmd_buffer->upload.upload_bo); + cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + } } cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL; @@ -582,8 +586,8 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer, for_each_bit(i, descriptors_state->valid) { struct radv_descriptor_set *set = descriptors_state->sets[i]; - data[i * 2] = (uintptr_t)set; - data[i * 2 + 1] = (uintptr_t)set >> 32; + data[i * 2] = (uint64_t)(uintptr_t)set; + data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32; } radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data); @@ -880,6 +884,47 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->state.context_roll_without_scissor_emitted = true; } +static void +radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline) +{ + const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; + + + if (pipeline->device->physical_device->rad_info.chip_class < GFX9) + return; + + if (old_pipeline && + old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 && + old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control) + return; + + bool binning_flush = false; + if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 || + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + binning_flush = !old_pipeline || + G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) != + G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0); + } + + radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0, + pipeline->graphics.binning.pa_sc_binner_cntl_0 | + S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush)); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); + } else { + radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); + } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + + static void radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer, struct radv_shader_variant *shader) @@ -927,7 +972,8 @@ radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer, if (mask & RADV_PREFETCH_GS) { radv_emit_shader_prefetch(cmd_buffer, pipeline->shaders[MESA_SHADER_GEOMETRY]); - radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); + if (radv_pipeline_has_gs_copy_shader(pipeline)) + radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); } if (mask & RADV_PREFETCH_PS) @@ -1092,6 +1138,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) return; radv_update_multisample_state(cmd_buffer, pipeline); + radv_update_binning_state(cmd_buffer, pipeline); cmd_buffer->scratch_size_needed = MAX2(cmd_buffer->scratch_size_needed, @@ -1121,7 +1168,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) pipeline->shaders[i]->bo); } - if (radv_pipeline_has_gs(pipeline)) + if (radv_pipeline_has_gs_copy_shader(pipeline)) radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->gs_copy_shader->bo); @@ -1238,12 +1285,13 @@ static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, struct radv_attachment_info *att, - struct radv_image *image, + struct radv_image_view *iview, VkImageLayout layout) { bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; + struct radv_image *image = iview->image; if (!radv_layout_dcc_compressed(image, layout, radv_image_queue_family_mask(image, @@ -1252,7 +1300,45 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_DCC_ENABLE; } - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (radv_image_is_tc_compat_cmask(image) && + (radv_is_fmask_decompress_pipeline(cmd_buffer) || + radv_is_dcc_decompress_pipeline(cmd_buffer))) { + /* If this bit is set, the FMASK decompression operation + * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS). + */ + cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; + } + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); + radeon_emit(cmd_buffer->cs, cb->cb_color_base); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_view); + radeon_emit(cmd_buffer->cs, cb_color_info); + radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); + radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); + radeon_emit(cmd_buffer->cs, 0); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_base); + + radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4, + cb->cb_color_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4, + cb->cb_color_cmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4, + cb->cb_color_fmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4, + cb->cb_dcc_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4, + cb->cb_color_attrib2); + radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4, + cb->cb_color_attrib3); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32)); @@ -1291,9 +1377,17 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, } } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, iview->base_mip)) { /* Drawing with DCC enabled also compresses colorbuffers. */ - radv_update_dcc_metadata(cmd_buffer, image, true); + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + radv_update_dcc_metadata(cmd_buffer, image, &range, true); } } @@ -1306,7 +1400,8 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, uint32_t db_z_info = ds->db_z_info; uint32_t db_z_info_reg; - if (!radv_image_is_tc_compat_htile(image)) + if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug || + !radv_image_is_tc_compat_htile(image)) return; if (!radv_layout_has_htile(image, layout, @@ -1318,7 +1413,7 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, db_z_info &= C_028040_ZRANGE_PRECISION; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { db_z_info_reg = R_028038_DB_Z_INFO; } else { db_z_info_reg = R_028040_DB_Z_INFO; @@ -1362,8 +1457,26 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); - - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); + radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7); + radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1)); + radeon_emit(cmd_buffer->cs, db_z_info); + radeon_emit(cmd_buffer->cs, db_stencil_info); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3); radeon_emit(cmd_buffer->cs, ds->db_htile_data_base); radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32)); @@ -1498,6 +1611,10 @@ radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, { struct radeon_cmdbuf *cs = cmd_buffer->cs; uint64_t va = radv_buffer_get_va(image->bo); + + if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug) + return; + va += image->offset + image->tc_compat_zrange_offset; radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); @@ -1514,8 +1631,6 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkClearDepthStencilValue ds_clear_value) { - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; uint32_t cond_val; /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last @@ -1605,22 +1720,27 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->fce_pred_offset; + uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1628,22 +1748,27 @@ radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->dcc_pred_offset; + uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1685,23 +1810,28 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, static void radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t color_values[2]) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, cmd_buffer->state.predicating)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, color_values[0]); - radeon_emit(cs, color_values[1]); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, color_values[0]); + radeon_emit(cs, color_values[1]); + } } /** @@ -1709,13 +1839,23 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, int cb_idx, uint32_t color_values[2]) { - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + struct radv_image *image = iview->image; + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, iview->base_mip)); - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values); radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx, color_values); @@ -1726,15 +1866,15 @@ radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + struct radv_image_view *iview, int cb_idx) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + struct radv_image *image = iview->image; + uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip); - if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image)) + if (!radv_image_has_cmask(image) && + !radv_dcc_enabled(image, iview->base_mip)) return; uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; @@ -1766,7 +1906,6 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int i; struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; - unsigned num_bpp64_colorbufs = 0; /* this may happen for inherited secondary recording */ if (!framebuffer) @@ -1781,19 +1920,16 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int idx = subpass->color_attachments[i].attachment; struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; + struct radv_image_view *iview = att->attachment; VkImageLayout layout = subpass->color_attachments[i].layout; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); - radv_emit_fb_color_state(cmd_buffer, i, att, image, layout); + radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); - radv_load_color_clear_metadata(cmd_buffer, image, i); - - if (image->planes[0].surface.bpe >= 8) - num_bpp64_colorbufs++; + radv_load_color_clear_metadata(cmd_buffer, iview, i); } if (subpass->depth_stencil_attachment) { @@ -1802,7 +1938,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) struct radv_attachment_info *att = &framebuffer->attachments[idx]; struct radv_image *image = att->attachment->image; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image, + ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index); /* We currently don't support writing decompressed HTILE */ @@ -1817,7 +1953,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) } radv_load_ds_clear_metadata(cmd_buffer, image); } else { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2); else radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); @@ -1830,23 +1966,19 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_Y(framebuffer->height)); if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { - uint8_t watermark = 4; /* Default value for GFX8. */ - - /* For optimal DCC performance. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - if (num_bpp64_colorbufs >= 5) { - watermark = 8; - } else { - watermark = 6; - } - } + bool disable_constant_encode = + cmd_buffer->device->physical_device->has_dcc_constant_encode; + enum chip_class chip_class = + cmd_buffer->device->physical_device->rad_info.chip_class; + uint8_t watermark = chip_class >= GFX10 ? 6 : 4; radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL, - S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | - S_028424_OVERWRITE_COMBINER_WATERMARK(watermark)); + S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) | + S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) | + S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode)); } - if (cmd_buffer->device->dfsm_allowed) { + if (cmd_buffer->device->pbb_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); } @@ -1862,7 +1994,8 @@ radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer) if (state->index_type != state->last_index_type) { if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE, + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_03090C_VGT_INDEX_TYPE, 2, state->index_type); } else { radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); @@ -1909,10 +2042,12 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) } else { const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; + bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries; if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | + S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) | S_028004_SAMPLE_RATE(sample_rate) | S_028004_ZPASS_ENABLE(1) | S_028004_SLICE_EVEN_ENABLE(1) | @@ -2079,7 +2214,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, if (flush_indirect_descriptors) radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, MAX_SETS * MESA_SHADER_STAGES * 4); @@ -2165,7 +2300,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); va += offset; - MAYBE_UNUSED unsigned cdw_max = + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, MESA_SHADER_STAGES * 4); @@ -2229,9 +2364,16 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) | + S_008F0C_OOB_SELECT(1) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2269,7 +2411,7 @@ radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va) base_reg + loc->sgpr_idx * 4, va, false); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS]; if (loc->sgpr_idx != -1) { base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -2319,8 +2461,15 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(3) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2392,26 +2541,42 @@ struct radv_draw_info { uint64_t strmout_buffer_offset; }; +static uint32_t +radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) +{ + switch (cmd_buffer->state.index_type) { + case V_028A7C_VGT_INDEX_8: + return 0xffu; + case V_028A7C_VGT_INDEX_16: + return 0xffffu; + case V_028A7C_VGT_INDEX_32: + return 0xffffffffu; + default: + unreachable("invalid index type"); + } +} + static void -radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, - const struct radv_draw_info *draw_info) +si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, + bool instanced_draw, bool indirect_draw, + bool count_from_stream_output, + uint32_t draw_vertex_count) { struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; struct radv_cmd_state *state = &cmd_buffer->state; struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint32_t ia_multi_vgt_param; - int32_t primitive_reset_en; + unsigned ia_multi_vgt_param; - /* Draw state. */ ia_multi_vgt_param = - si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, - draw_info->indirect, - !!draw_info->strmout_buffer, - draw_info->indirect ? 0 : draw_info->count); + si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, + indirect_draw, + count_from_stream_output, + draw_vertex_count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { - if (info->chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, + if (info->chip_class == GFX9) { + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); } else if (info->chip_class >= GFX7) { @@ -2424,6 +2589,24 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, } state->last_ia_multi_vgt_param = ia_multi_vgt_param; } +} + +static void +radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, + const struct radv_draw_info *draw_info) +{ + struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; + struct radv_cmd_state *state = &cmd_buffer->state; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + int32_t primitive_reset_en; + + /* Draw state. */ + if (info->chip_class < GFX10) { + si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, + draw_info->indirect, + !!draw_info->strmout_buffer, + draw_info->indirect ? 0 : draw_info->count); + } /* Primitive restart. */ primitive_reset_en = @@ -2444,7 +2627,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, if (primitive_reset_en) { uint32_t primitive_reset_index = - state->index_type ? 0xffffffffu : 0xffffu; + radv_get_primitive_reset_index(cmd_buffer); if (primitive_reset_index != state->last_primitive_reset_index) { radeon_set_context_reg(cs, @@ -2527,7 +2710,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_SHADER_WRITE_BIT: case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT: case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: - flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_WB_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; @@ -2542,7 +2725,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFER_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB | - RADV_CMD_FLAG_INV_GLOBAL_L2; + RADV_CMD_FLAG_INV_L2; if (flush_CB_meta) flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -2578,7 +2761,9 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, if (!radv_image_has_htile(image)) flush_DB_meta = false; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + /* TODO: implement shader coherent for GFX10 */ + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { if (image->info.samples == 1 && (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) && @@ -2599,19 +2784,19 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: break; case VK_ACCESS_UNIFORM_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE; break; case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: case VK_ACCESS_TRANSFER_READ_BIT: case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | - RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | + RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_SHADER_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE; if (!image_is_coherent) - flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: if (flush_CB) @@ -2642,11 +2827,67 @@ void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, NULL); } +uint32_t +radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = state->subpass - state->pass->subpasses; + + /* The id of this subpass shouldn't exceed the number of subpasses in + * this render pass minus 1. + */ + assert(subpass_id < state->pass->subpass_count); + return subpass_id; +} + +static struct radv_sample_locations_state * +radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, + uint32_t att_idx, + bool begin_subpass) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + + if (view->image->info.samples == 1) + return NULL; + + if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) { + /* Return the initial sample locations if this is the initial + * layout transition of the given subpass attachemnt. + */ + if (state->attachments[att_idx].sample_location.count > 0) + return &state->attachments[att_idx].sample_location; + } else { + /* Otherwise return the subpass sample locations if defined. */ + if (state->subpass_sample_locs) { + /* Because the driver sets the current subpass before + * initial layout transitions, we should use the sample + * locations from the previous subpass to avoid an + * off-by-one problem. Otherwise, use the sample + * locations for the current subpass for final layout + * transitions. + */ + if (begin_subpass) + subpass_id--; + + for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) { + if (state->subpass_sample_locs[i].subpass_idx == subpass_id) + return &state->subpass_sample_locs[i].sample_location; + } + } + } + + return NULL; +} + static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, - struct radv_subpass_attachment att) + struct radv_subpass_attachment att, + bool begin_subpass) { unsigned idx = att.attachment; struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; range.aspectMask = 0; range.baseMipLevel = view->base_mip; @@ -2665,10 +2906,16 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask); } + /* Get the subpass sample locations for the given attachment, if NULL + * is returned the driver will use the default HW locations. + */ + sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, + begin_subpass); + radv_handle_image_transition(cmd_buffer, view->image, cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range); + att.layout, 0, 0, &range, sample_locs); cmd_buffer->state.attachments[idx].current_layout = att.layout; @@ -2684,6 +2931,89 @@ radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; } +static VkResult +radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, + struct radv_render_pass *pass, + const VkRenderPassBeginInfo *info) +{ + const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs = + vk_find_struct_const(info->pNext, + RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_framebuffer *framebuffer = state->framebuffer; + + if (!sample_locs) { + state->subpass_sample_locs = NULL; + return VK_SUCCESS; + } + + for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) { + const VkAttachmentSampleLocationsEXT *att_sample_locs = + &sample_locs->pAttachmentInitialSampleLocations[i]; + uint32_t att_idx = att_sample_locs->attachmentIndex; + struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; + struct radv_image *image = att->attachment->image; + + assert(vk_format_is_depth_or_stencil(image->vk_format)); + + /* From the Vulkan spec 1.1.108: + * + * "If the image referenced by the framebuffer attachment at + * index attachmentIndex was not created with + * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT + * then the values specified in sampleLocationsInfo are + * ignored." + */ + if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) + continue; + + const VkSampleLocationsInfoEXT *sample_locs_info = + &att_sample_locs->sampleLocationsInfo; + + state->attachments[att_idx].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->attachments[att_idx].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->attachments[att_idx].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->attachments[att_idx].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc, + sample_locs->postSubpassSampleLocationsCount * + sizeof(state->subpass_sample_locs[0]), + 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (state->subpass_sample_locs == NULL) { + cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY; + return cmd_buffer->record_result; + } + + state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount; + + for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) { + const VkSubpassSampleLocationsEXT *subpass_sample_locs_info = + &sample_locs->pPostSubpassSampleLocations[i]; + const VkSampleLocationsInfoEXT *sample_locs_info = + &subpass_sample_locs_info->sampleLocationsInfo; + + state->subpass_sample_locs[i].subpass_idx = + subpass_sample_locs_info->subpassIndex; + state->subpass_sample_locs[i].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->subpass_sample_locs[i].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->subpass_sample_locs[i].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + return VK_SUCCESS; +} + static VkResult radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, struct radv_render_pass *pass, @@ -2738,6 +3068,7 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].sample_location.count = 0; } return VK_SUCCESS; @@ -2917,6 +3248,36 @@ void radv_CmdBindVertexBuffers( cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER; } +static uint32_t +vk_to_index_type(VkIndexType type) +{ + switch (type) { + case VK_INDEX_TYPE_UINT8_EXT: + return V_028A7C_VGT_INDEX_8; + case VK_INDEX_TYPE_UINT16: + return V_028A7C_VGT_INDEX_16; + case VK_INDEX_TYPE_UINT32: + return V_028A7C_VGT_INDEX_32; + default: + unreachable("invalid index type"); + } +} + +static uint32_t +radv_get_vgt_index_size(uint32_t type) +{ + switch (type) { + case V_028A7C_VGT_INDEX_8: + return 1; + case V_028A7C_VGT_INDEX_16: + return 2; + case V_028A7C_VGT_INDEX_32: + return 4; + default: + unreachable("invalid index type"); + } +} + void radv_CmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, @@ -2935,12 +3296,12 @@ void radv_CmdBindIndexBuffer( cmd_buffer->state.index_buffer = index_buffer; cmd_buffer->state.index_offset = offset; - cmd_buffer->state.index_type = indexType; /* vk matches hw */ + cmd_buffer->state.index_type = vk_to_index_type(indexType); cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo); cmd_buffer->state.index_va += index_buffer->offset + offset; - int index_size_shift = cmd_buffer->state.index_type ? 2 : 1; - cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift; + int index_size = radv_get_vgt_index_size(indexType); + cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo); } @@ -3004,9 +3365,17 @@ void radv_CmdBindDescriptorSets( dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(3) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } + cmd_buffer->push_constant_stages |= set->layout->dynamic_shader_stages; } @@ -3097,6 +3466,14 @@ void radv_CmdPushDescriptorSetKHR( pipelineBindPoint)) return; + /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR() + * because it is invalid, according to Vulkan spec. + */ + for (int i = 0; i < descriptorWriteCount; i++) { + ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; + assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); + } + radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer, radv_descriptor_set_to_handle(push_set), descriptorWriteCount, pDescriptorWrites, 0, NULL); @@ -3152,7 +3529,7 @@ VkResult radv_EndCommandBuffer( if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) - cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2; /* Make sure to sync all pending active queries at the end of * command buffer. @@ -3168,6 +3545,7 @@ VkResult radv_EndCommandBuffer( si_cp_dma_wait_for_idle(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); @@ -3273,7 +3651,7 @@ void radv_CmdSetViewport( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount; + ASSERTED const uint32_t total_count = firstViewport + viewportCount; assert(firstViewport < MAX_VIEWPORTS); assert(total_count >= 1 && total_count <= MAX_VIEWPORTS); @@ -3297,7 +3675,7 @@ void radv_CmdSetScissor( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount; + ASSERTED const uint32_t total_count = firstScissor + scissorCount; assert(firstScissor < MAX_SCISSORS); assert(total_count >= 1 && total_count <= MAX_SCISSORS); @@ -3459,7 +3837,7 @@ void radv_CmdSetDiscardRectangleEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; + ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES); assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES); @@ -3523,6 +3901,15 @@ void radv_CmdExecuteCommands( if (secondary->sample_positions_needed) primary->sample_positions_needed = true; + if (!secondary->state.framebuffer && + (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) { + /* Emit the framebuffer state from primary if secondary + * has been recorded without a framebuffer, otherwise + * fast color/depth clears can't work. + */ + radv_emit_framebuffer_state(primary); + } + primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); @@ -3666,19 +4053,6 @@ void radv_TrimCommandPool( } } -static uint32_t -radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) -{ - struct radv_cmd_state *state = &cmd_buffer->state; - uint32_t subpass_id = state->subpass - state->pass->subpasses; - - /* The id of this subpass shouldn't exceed the number of subpasses in - * this render pass minus 1. - */ - assert(subpass_id < state->pass->subpass_count); - return subpass_id; -} - static void radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, uint32_t subpass_id) @@ -3686,7 +4060,7 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, struct radv_cmd_state *state = &cmd_buffer->state; struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); @@ -3699,7 +4073,8 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, continue; radv_handle_subpass_image_transition(cmd_buffer, - subpass->attachments[i]); + subpass->attachments[i], + true); } radv_cmd_buffer_clear_subpass(cmd_buffer); @@ -3725,8 +4100,8 @@ radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) continue; VkImageLayout layout = state->pass->attachments[a].final_layout; - radv_handle_subpass_image_transition(cmd_buffer, - (struct radv_subpass_attachment){a, layout}); + struct radv_subpass_attachment att = { a, layout }; + radv_handle_subpass_image_transition(cmd_buffer, att, false); } } @@ -3748,6 +4123,10 @@ void radv_CmdBeginRenderPass( if (result != VK_SUCCESS) return; + result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin); + if (result != VK_SUCCESS) + return; + radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } @@ -3793,7 +4172,7 @@ static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned in radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX]; if (loc->sgpr_idx != -1) { uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -3941,7 +4320,7 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, } if (info->indexed) { - int index_size = state->index_type ? 4 : 2; + int index_size = radv_get_vgt_index_size(state->index_type); uint64_t index_va; index_va = state->index_va; @@ -4020,8 +4399,11 @@ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & used_states) return true; + uint32_t primitive_reset_index = + radv_get_primitive_reset_index(cmd_buffer); + if (info->indexed && state->pipeline->graphics.prim_restart_enable && - (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index) + primitive_reset_index != state->last_primitive_reset_index) return true; return false; @@ -4083,7 +4465,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; - MAYBE_UNUSED unsigned cdw_max = + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); @@ -4338,7 +4720,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25); + ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25); if (info->indirect) { uint64_t va = radv_buffer_get_va(info->indirect->bo); @@ -4590,11 +4972,13 @@ void radv_CmdEndRenderPass( radv_cmd_buffer_end_subpass(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); cmd_buffer->state.pass = NULL; cmd_buffer->state.subpass = NULL; cmd_buffer->state.attachments = NULL; cmd_buffer->state.framebuffer = NULL; + cmd_buffer->state.subpass_sample_locs = NULL; } void radv_CmdEndRenderPass2KHR( @@ -4650,7 +5034,8 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe VkImageLayout dst_layout, unsigned src_queue_mask, unsigned dst_queue_mask, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (!radv_image_has_htile(image)) return; @@ -4678,7 +5063,8 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range); + radv_decompress_depth_image_inplace(cmd_buffer, image, + &local_range, sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4686,20 +5072,23 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe } static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, + uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image) + struct radv_image *image, + const VkImageSubresourceRange *range) { struct radv_cmd_state *state = &cmd_buffer->state; static const uint32_t fmask_clear_values[4] = { @@ -4714,20 +5103,50 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value); + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value); + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) { + /* When DCC is enabled with mipmaps, some levels might not + * support fast clears and we have to initialize them as "fully + * expanded". + */ + /* Compute the size of all fast clearable DCC levels. */ + for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[i]; + unsigned dcc_fast_clear_size = + surf_level->dcc_slice_fast_clear_size * image->info.array_size; + + if (!dcc_fast_clear_size) + break; + + size = surf_level->dcc_offset + dcc_fast_clear_size; + } + + /* Initialize the mipmap levels without DCC. */ + if (size != image->planes[0].surface.dcc_size) { + state->flush_bits |= + radv_fill_buffer(cmd_buffer, image->bo, + image->offset + image->dcc_offset + size, + image->planes[0].surface.dcc_size - size, + 0xffffffff); + } + } state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -4741,7 +5160,8 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, VkImageLayout src_layout, VkImageLayout dst_layout, unsigned src_queue_mask, - unsigned dst_queue_mask) + unsigned dst_queue_mask, + const VkImageSubresourceRange *range) { if (radv_image_has_cmask(image)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ @@ -4751,14 +5171,14 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, value = 0xccccccccu; } - radv_initialise_cmask(cmd_buffer, image, value); + radv_initialise_cmask(cmd_buffer, image, range, value); } if (radv_image_has_fmask(image)) { - radv_initialize_fmask(cmd_buffer, image); + radv_initialize_fmask(cmd_buffer, image, range); } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; @@ -4768,15 +5188,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, need_decompress_pass = true; } - radv_initialize_dcc(cmd_buffer, image, value); + radv_initialize_dcc(cmd_buffer, image, range, value); - radv_update_fce_metadata(cmd_buffer, image, + radv_update_fce_metadata(cmd_buffer, image, range, need_decompress_pass); } - if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) { + if (radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t color_values[2] = {}; - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, range, + color_values); } } @@ -4794,13 +5216,14 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, src_layout, dst_layout, - src_queue_mask, dst_queue_mask); + src_queue_mask, dst_queue_mask, + range); return; } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { - radv_initialize_dcc(cmd_buffer, image, 0xffffffffu); + radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); @@ -4840,7 +5263,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (image->exclusive && src_family != dst_family) { /* This is an acquire or a release operation and there will be @@ -4850,6 +5274,10 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, assert(src_family == cmd_buffer->queue_family_index || dst_family == cmd_buffer->queue_family_index); + if (src_family == VK_QUEUE_FAMILY_EXTERNAL || + src_family == VK_QUEUE_FAMILY_FOREIGN_EXT) + return; + if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) return; @@ -4873,7 +5301,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, radv_handle_depth_image_transition(cmd_buffer, image, src_layout, dst_layout, src_queue_mask, dst_queue_mask, - range); + range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, src_layout, dst_layout, @@ -4909,7 +5337,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff); assert(cmd_buffer->cs->cdw <= cdw_max); @@ -4955,12 +5383,29 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); + + const struct VkSampleLocationsInfoEXT *sample_locs_info = + vk_find_struct_const(pImageMemoryBarriers[i].pNext, + SAMPLE_LOCATIONS_INFO_EXT); + struct radv_sample_locations_state sample_locations = {}; + + if (sample_locs_info) { + assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT); + sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel; + sample_locations.grid_size = sample_locs_info->sampleLocationGridSize; + sample_locations.count = sample_locs_info->sampleLocationsCount; + typed_memcpy(&sample_locations.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, - &pImageMemoryBarriers[i].subresourceRange); + &pImageMemoryBarriers[i].subresourceRange, + sample_locs_info ? &sample_locations : NULL); } /* Make sure CP DMA is idle because the driver might have performed a @@ -5011,7 +5456,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); /* Flags that only require a top-of-pipe event. */ VkPipelineStageFlags top_of_pipe_flags = @@ -5056,6 +5501,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, va, value, cmd_buffer->gfx9_eop_bug_va); } @@ -5298,14 +5744,14 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) radeon_emit(cs, 4); /* poll interval */ } -void radv_CmdBeginTransformFeedbackEXT( - VkCommandBuffer commandBuffer, - uint32_t firstCounterBuffer, - uint32_t counterBufferCount, - const VkBuffer* pCounterBuffers, - const VkDeviceSize* pCounterBufferOffsets) +static void +radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) + { - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; @@ -5363,7 +5809,7 @@ void radv_CmdBeginTransformFeedbackEXT( radv_set_streamout_enable(cmd_buffer, true); } -void radv_CmdEndTransformFeedbackEXT( +void radv_CmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, @@ -5371,6 +5817,19 @@ void radv_CmdEndTransformFeedbackEXT( const VkDeviceSize* pCounterBufferOffsets) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); +} + +static void +radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; uint32_t i; @@ -5416,6 +5875,20 @@ void radv_CmdEndTransformFeedbackEXT( radv_set_streamout_enable(cmd_buffer, false); } +void radv_CmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); +} + void radv_CmdDrawIndirectByteCountEXT( VkCommandBuffer commandBuffer, uint32_t instanceCount, @@ -5437,3 +5910,39 @@ void radv_CmdDrawIndirectByteCountEXT( radv_draw(cmd_buffer, &info); } + +/* VK_AMD_buffer_marker */ +void radv_CmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset; + + si_emit_cache_flush(cmd_buffer); + + if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) { + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, marker); + radeon_emit(cs, 0); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + } else { + si_cs_emit_write_event_eop(cs, + cmd_buffer->device->physical_device->rad_info.chip_class, + radv_cmd_buffer_uses_mec(cmd_buffer), + V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DST_SEL_MEM, + EOP_DATA_SEL_VALUE_32BIT, + va, marker, + cmd_buffer->gfx9_eop_bug_va); + } +}