X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=a833fc4221b8afcb404e326885f8bd5aae8118ed;hb=abc226cf41574454c79477c217e60e8ff1fddfad;hp=3361df1f345aae994410d6670a0d262fca586799;hpb=6b976024a87c4f4138435d4e416fdcdfbed39724;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 3361df1f345..a833fc4221b 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -30,8 +30,8 @@ #include "radv_shader.h" #include "radv_cs.h" #include "sid.h" -#include "gfx9d.h" #include "vk_format.h" +#include "vk_util.h" #include "radv_debug.h" #include "radv_meta.h" @@ -57,7 +57,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range); + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs); const struct radv_dynamic_state default_dynamic_state = { .viewport = { @@ -105,6 +106,7 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, dest->viewport.count = src->viewport.count; dest->scissor.count = src->scissor.count; dest->discard_rectangle.count = src->discard_rectangle.count; + dest->sample_location.count = src->sample_location.count; if (copy_mask & RADV_DYNAMIC_VIEWPORT) { if (memcmp(&dest->viewport.viewports, &src->viewport.viewports, @@ -192,6 +194,22 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, } } + if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) { + if (dest->sample_location.per_pixel != src->sample_location.per_pixel || + dest->sample_location.grid_size.width != src->sample_location.grid_size.width || + dest->sample_location.grid_size.height != src->sample_location.grid_size.height || + memcmp(&dest->sample_location.locations, + &src->sample_location.locations, + src->sample_location.count * sizeof(VkSampleLocationEXT))) { + dest->sample_location.per_pixel = src->sample_location.per_pixel; + dest->sample_location.grid_size = src->sample_location.grid_size; + typed_memcpy(dest->sample_location.locations, + src->sample_location.locations, + src->sample_location.count); + dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS; + } + } + cmd_buffer->state.dirty |= dest_mask; } @@ -215,7 +233,7 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) { return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; } enum ring_type radv_queue_family_to_ring(int f) { @@ -301,7 +319,6 @@ radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) static VkResult radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { - cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, @@ -326,28 +343,35 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->record_result = VK_SUCCESS; + memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings)); + for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) { cmd_buffer->descriptors[i].dirty = 0; cmd_buffer->descriptors[i].valid = 0; cmd_buffer->descriptors[i].push_dirty = false; } - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 && + cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) { unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends; - unsigned eop_bug_offset; + unsigned fence_offset, eop_bug_offset; void *fence_ptr; - radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, - &cmd_buffer->gfx9_fence_offset, + radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset, &fence_ptr); - cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo; - /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0, - &eop_bug_offset, &fence_ptr); - cmd_buffer->gfx9_eop_bug_va = + cmd_buffer->gfx9_fence_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); - cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + cmd_buffer->gfx9_fence_va += fence_offset; + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { + /* Allocate a buffer for the EOP bug on GFX9. */ + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, + &eop_bug_offset, &fence_ptr); + cmd_buffer->gfx9_eop_bug_va = + radv_buffer_get_va(cmd_buffer->upload.upload_bo); + cmd_buffer->gfx9_eop_bug_va += eop_bug_offset; + } } cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL; @@ -372,7 +396,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS| RADEON_FLAG_NO_INTERPROCESS_SHARING | - RADEON_FLAG_32BIT); + RADEON_FLAG_32BIT, + RADV_BO_PRIORITY_UPLOAD_BUFFER); if (!bo) { cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY; @@ -413,6 +438,8 @@ radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, unsigned *out_offset, void **ptr) { + assert(util_is_power_of_two_nonzero(alignment)); + uint64_t offset = align(cmd_buffer->upload.offset, alignment); if (offset + size > cmd_buffer->upload.size) { if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) @@ -453,7 +480,7 @@ radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va, radeon_check_space(cmd_buffer->device->ws, cs, 4 + count); radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME)); radeon_emit(cs, va); @@ -486,24 +513,16 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, enum radv_cmd_flush_bits flags) { if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) { - uint32_t *ptr = NULL; - uint64_t va = 0; - assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_CS_PARTIAL_FLUSH)); - if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { - va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) + - cmd_buffer->gfx9_fence_offset; - ptr = &cmd_buffer->gfx9_fence_idx; - } - radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4); /* Force wait for graphics or compute engines to be idle. */ si_cs_emit_cache_flush(cmd_buffer->cs, cmd_buffer->device->physical_device->rad_info.chip_class, - ptr, va, + &cmd_buffer->gfx9_fence_idx, + cmd_buffer->gfx9_fence_va, radv_cmd_buffer_uses_mec(cmd_buffer), flags, cmd_buffer->gfx9_eop_bug_va); } @@ -567,8 +586,8 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer, for_each_bit(i, descriptors_state->valid) { struct radv_descriptor_set *set = descriptors_state->sets[i]; - data[i * 2] = (uintptr_t)set; - data[i * 2 + 1] = (uintptr_t)set >> 32; + data[i * 2] = (uint64_t)(uintptr_t)set; + data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32; } radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data); @@ -595,7 +614,6 @@ radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer, return; assert(loc->num_sgprs == 1); - assert(!loc->indirect); radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, va, false); @@ -634,6 +652,207 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer, } } +/** + * Convert the user sample locations to hardware sample locations (the values + * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*). + */ +static void +radv_convert_user_sample_locs(struct radv_sample_locations_state *state, + uint32_t x, uint32_t y, VkOffset2D *sample_locs) +{ + uint32_t x_offset = x % state->grid_size.width; + uint32_t y_offset = y % state->grid_size.height; + uint32_t num_samples = (uint32_t)state->per_pixel; + VkSampleLocationEXT *user_locs; + uint32_t pixel_offset; + + pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples; + + assert(pixel_offset <= MAX_SAMPLE_LOCATIONS); + user_locs = &state->locations[pixel_offset]; + + for (uint32_t i = 0; i < num_samples; i++) { + float shifted_pos_x = user_locs[i].x - 0.5; + float shifted_pos_y = user_locs[i].y - 0.5; + + int32_t scaled_pos_x = floor(shifted_pos_x * 16); + int32_t scaled_pos_y = floor(shifted_pos_y * 16); + + sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7); + sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7); + } +} + +/** + * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample + * locations. + */ +static void +radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs, + uint32_t *sample_locs_pixel) +{ + for (uint32_t i = 0; i < num_samples; i++) { + uint32_t sample_reg_idx = i / 4; + uint32_t sample_loc_idx = i % 4; + int32_t pos_x = sample_locs[i].x; + int32_t pos_y = sample_locs[i].y; + + uint32_t shift_x = 8 * sample_loc_idx; + uint32_t shift_y = shift_x + 4; + + sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x; + sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y; + } +} + +/** + * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware + * sample locations. + */ +static uint64_t +radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer, + VkOffset2D *sample_locs, + uint32_t num_samples) +{ + uint32_t centroid_priorities[num_samples]; + uint32_t sample_mask = num_samples - 1; + uint32_t distances[num_samples]; + uint64_t centroid_priority = 0; + + /* Compute the distances from center for each sample. */ + for (int i = 0; i < num_samples; i++) { + distances[i] = (sample_locs[i].x * sample_locs[i].x) + + (sample_locs[i].y * sample_locs[i].y); + } + + /* Compute the centroid priorities by looking at the distances array. */ + for (int i = 0; i < num_samples; i++) { + uint32_t min_idx = 0; + + for (int j = 1; j < num_samples; j++) { + if (distances[j] < distances[min_idx]) + min_idx = j; + } + + centroid_priorities[i] = min_idx; + distances[min_idx] = 0xffffffff; + } + + /* Compute the final centroid priority. */ + for (int i = 0; i < 8; i++) { + centroid_priority |= + centroid_priorities[i & sample_mask] << (i * 4); + } + + return centroid_priority << 32 | centroid_priority; +} + +/** + * Emit the sample locations that are specified with VK_EXT_sample_locations. + */ +static void +radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; + struct radv_multisample_state *ms = &pipeline->graphics.ms; + struct radv_sample_locations_state *sample_location = + &cmd_buffer->state.dynamic.sample_location; + uint32_t num_samples = (uint32_t)sample_location->per_pixel; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t sample_locs_pixel[4][2] = {}; + VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */ + uint32_t max_sample_dist = 0; + uint64_t centroid_priority; + + if (!cmd_buffer->state.dynamic.sample_location.count) + return; + + /* Convert the user sample locations to hardware sample locations. */ + radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]); + radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]); + radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]); + radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]); + + /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */ + for (uint32_t i = 0; i < 4; i++) { + radv_compute_sample_locs_pixel(num_samples, sample_locs[i], + sample_locs_pixel[i]); + } + + /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */ + centroid_priority = + radv_compute_centroid_priority(cmd_buffer, sample_locs[0], + num_samples); + + /* Compute the maximum sample distance from the specified locations. */ + for (uint32_t i = 0; i < num_samples; i++) { + VkOffset2D offset = sample_locs[0][i]; + max_sample_dist = MAX2(max_sample_dist, + MAX2(abs(offset.x), abs(offset.y))); + } + + /* Emit the specified user sample locations. */ + switch (num_samples) { + case 2: + case 4: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + break; + case 8: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]); + radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]); + radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]); + radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]); + break; + default: + unreachable("invalid number of samples"); + } + + /* Emit the maximum sample distance and the centroid priority. */ + uint32_t pa_sc_aa_config = ms->pa_sc_aa_config; + + pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST; + pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist); + + radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1); + radeon_emit(cs, pa_sc_aa_config); + + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, centroid_priority); + radeon_emit(cs, centroid_priority >> 32); + + /* GFX9: Flush DFSM when the AA mode changes. */ + if (cmd_buffer->device->dfsm_allowed) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); + } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + +static void +radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline, + gl_shader_stage stage, + int idx, int count, uint32_t *values) +{ + struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); + uint32_t base_reg = pipeline->user_data_0[stage]; + if (loc->sgpr_idx == -1) + return; + + assert(loc->num_sgprs == count); + + radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count); + radeon_emit_array(cmd_buffer->cs, values, count); +} + static void radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pipeline) @@ -654,15 +873,58 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); - radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); + radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); /* GFX9: Flush DFSM when the AA mode changes. */ if (cmd_buffer->device->dfsm_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + +static void +radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline) +{ + const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; + + + if (pipeline->device->physical_device->rad_info.chip_class < GFX9) + return; + + if (old_pipeline && + old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 && + old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control) + return; + + bool binning_flush = false; + if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 || + cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 || + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + binning_flush = !old_pipeline || + G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) != + G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0); + } + + radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0, + pipeline->graphics.binning.pa_sc_binner_cntl_0 | + S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush)); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); + } else { + radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL, + pipeline->graphics.binning.db_dfsm_control); + } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } + static void radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer, struct radv_shader_variant *shader) @@ -710,7 +972,8 @@ radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer, if (mask & RADV_PREFETCH_GS) { radv_emit_shader_prefetch(cmd_buffer, pipeline->shaders[MESA_SHADER_GEOMETRY]); - radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); + if (radv_pipeline_has_gs_copy_shader(pipeline)) + radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader); } if (mask & RADV_PREFETCH_PS) @@ -857,10 +1120,13 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4); sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4); } + /* TODO: avoid redundantly setting context registers */ radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3); radeon_emit(cmd_buffer->cs, sx_ps_downconvert); radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon); radeon_emit(cmd_buffer->cs, sx_blend_opt_control); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -872,6 +1138,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) return; radv_update_multisample_state(cmd_buffer, pipeline); + radv_update_binning_state(cmd_buffer, pipeline); cmd_buffer->scratch_size_needed = MAX2(cmd_buffer->scratch_size_needed, @@ -884,6 +1151,15 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw); + if (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw || + cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash || + memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf, + pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) { + radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw); + cmd_buffer->state.context_roll_without_scissor_emitted = true; + } + for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) { if (!pipeline->shaders[i]) continue; @@ -892,7 +1168,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) pipeline->shaders[i]->bo); } - if (radv_pipeline_has_gs(pipeline)) + if (radv_pipeline_has_gs_copy_shader(pipeline)) radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->gs_copy_shader->bo); @@ -920,6 +1196,8 @@ radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.dynamic.scissor.scissors, cmd_buffer->state.dynamic.viewport.viewports, cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband); + + cmd_buffer->state.context_roll_without_scissor_emitted = false; } static void @@ -1007,12 +1285,13 @@ static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, struct radv_attachment_info *att, - struct radv_image *image, + struct radv_image_view *iview, VkImageLayout layout) { - bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; + bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; + struct radv_image *image = iview->image; if (!radv_layout_dcc_compressed(image, layout, radv_image_queue_family_mask(image, @@ -1021,7 +1300,45 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_DCC_ENABLE; } - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (radv_image_is_tc_compat_cmask(image) && + (radv_is_fmask_decompress_pipeline(cmd_buffer) || + radv_is_dcc_decompress_pipeline(cmd_buffer))) { + /* If this bit is set, the FMASK decompression operation + * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS). + */ + cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; + } + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); + radeon_emit(cmd_buffer->cs, cb->cb_color_base); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_view); + radeon_emit(cmd_buffer->cs, cb_color_info); + radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); + radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); + radeon_emit(cmd_buffer->cs, 0); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1); + radeon_emit(cmd_buffer->cs, cb->cb_dcc_base); + + radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4, + cb->cb_color_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4, + cb->cb_color_cmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4, + cb->cb_color_fmask >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4, + cb->cb_dcc_base >> 32); + radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4, + cb->cb_color_attrib2); + radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4, + cb->cb_color_attrib3); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32)); @@ -1040,7 +1357,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32)); radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4, - S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch)); + cb->cb_mrt_epitch); } else { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1060,9 +1377,17 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, } } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, iview->base_mip)) { /* Drawing with DCC enabled also compresses colorbuffers. */ - radv_update_dcc_metadata(cmd_buffer, image, true); + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + radv_update_dcc_metadata(cmd_buffer, image, &range, true); } } @@ -1075,7 +1400,8 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, uint32_t db_z_info = ds->db_z_info; uint32_t db_z_info_reg; - if (!radv_image_is_tc_compat_htile(image)) + if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug || + !radv_image_is_tc_compat_htile(image)) return; if (!radv_layout_has_htile(image, layout, @@ -1087,7 +1413,7 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, db_z_info &= C_028040_ZRANGE_PRECISION; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { db_z_info_reg = R_028038_DB_Z_INFO; } else { db_z_info_reg = R_028040_DB_Z_INFO; @@ -1131,8 +1457,26 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); - - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); + radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7); + radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1)); + radeon_emit(cmd_buffer->cs, db_z_info); + radeon_emit(cmd_buffer->cs, db_stencil_info); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); + + radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); + radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32); + } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3); radeon_emit(cmd_buffer->cs, ds->db_htile_data_base); radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32)); @@ -1195,10 +1539,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, if (!framebuffer || !subpass) return; - att_idx = subpass->depth_stencil_attachment.attachment; - if (att_idx == VK_ATTACHMENT_UNUSED) + if (!subpass->depth_stencil_attachment) return; + att_idx = subpass->depth_stencil_attachment->attachment; att = &framebuffer->attachments[att_idx]; if (att->attachment->image != image) return; @@ -1212,11 +1556,13 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, */ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && ds_clear_value.depth == 0.0) { - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; radv_update_zrange_precision(cmd_buffer, &att->ds, image, layout, false); } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } /** @@ -1243,8 +1589,8 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); @@ -1265,10 +1611,14 @@ radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, { struct radeon_cmdbuf *cs = cmd_buffer->cs; uint64_t va = radv_buffer_get_va(image->bo); + + if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug) + return; + va += image->offset + image->tc_compat_zrange_offset; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); @@ -1281,9 +1631,6 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkClearDepthStencilValue ds_clear_value) { - struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; uint32_t cond_val; /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last @@ -1345,7 +1692,7 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -1373,22 +1720,27 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->fce_pred_offset; + uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); - radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); + radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1396,22 +1748,27 @@ radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->dcc_pred_offset; + uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); - radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); + radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1443,6 +1800,8 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2); radeon_emit(cs, color_values[0]); radeon_emit(cs, color_values[1]); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } /** @@ -1451,23 +1810,28 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, static void radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t color_values[2]) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); + uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - va += image->offset + image->clear_value_offset; - - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, color_values[0]); - radeon_emit(cs, color_values[1]); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, color_values[0]); + radeon_emit(cs, color_values[1]); + } } /** @@ -1475,13 +1839,23 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, int cb_idx, uint32_t color_values[2]) { - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + struct radv_image *image = iview->image; + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, iview->base_mip)); - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values); radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx, color_values); @@ -1492,27 +1866,26 @@ radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + struct radv_image_view *iview, int cb_idx) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + struct radv_image *image = iview->image; + uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip); - if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image)) + if (!radv_image_has_cmask(image) && + !radv_dcc_enabled(image, iview->base_mip)) return; uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); radeon_emit(cs, 2); } else { - /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating)); radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | COPY_DATA_DST_SEL(COPY_DATA_REG) | @@ -1533,7 +1906,6 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int i; struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; - unsigned num_bpp64_colorbufs = 0; /* this may happen for inherited secondary recording */ if (!framebuffer) @@ -1548,27 +1920,25 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int idx = subpass->color_attachments[i].attachment; struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; + struct radv_image_view *iview = att->attachment; VkImageLayout layout = subpass->color_attachments[i].layout; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); - radv_emit_fb_color_state(cmd_buffer, i, att, image, layout); + assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); + radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); - radv_load_color_clear_metadata(cmd_buffer, image, i); - - if (image->surface.bpe >= 8) - num_bpp64_colorbufs++; + radv_load_color_clear_metadata(cmd_buffer, iview, i); } - if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - int idx = subpass->depth_stencil_attachment.attachment; - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + if (subpass->depth_stencil_attachment) { + int idx = subpass->depth_stencil_attachment->attachment; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; struct radv_attachment_info *att = &framebuffer->attachments[idx]; struct radv_image *image = att->attachment->image; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image, + ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index); /* We currently don't support writing decompressed HTILE */ @@ -1583,7 +1953,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) } radv_load_ds_clear_metadata(cmd_buffer, image); } else { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2); else radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); @@ -1595,24 +1965,20 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_X(framebuffer->width) | S_028208_BR_Y(framebuffer->height)); - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { - uint8_t watermark = 4; /* Default value for VI. */ - - /* For optimal DCC performance. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - if (num_bpp64_colorbufs >= 5) { - watermark = 8; - } else { - watermark = 6; - } - } + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { + bool disable_constant_encode = + cmd_buffer->device->physical_device->has_dcc_constant_encode; + enum chip_class chip_class = + cmd_buffer->device->physical_device->rad_info.chip_class; + uint8_t watermark = chip_class >= GFX10 ? 6 : 4; radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL, - S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | - S_028424_OVERWRITE_COMBINER_WATERMARK(watermark)); + S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) | + S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) | + S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode)); } - if (cmd_buffer->device->dfsm_allowed) { + if (cmd_buffer->device->pbb_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); } @@ -1628,7 +1994,8 @@ radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer) if (state->index_type != state->last_index_type) { if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE, + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_03090C_VGT_INDEX_TYPE, 2, state->index_type); } else { radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); @@ -1657,7 +2024,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) uint32_t db_count_control; if(!cmd_buffer->state.active_occlusion_queries) { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) && pipeline->graphics.disable_out_of_order_rast_for_occlusion && has_perfect_queries) { @@ -1675,10 +2042,12 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) } else { const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; + bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | + S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) | S_028004_SAMPLE_RATE(sample_rate) | S_028004_ZPASS_ENABLE(1) | S_028004_SLICE_EVEN_ENABLE(1) | @@ -1705,6 +2074,8 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) } radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -1739,6 +2110,9 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE) radv_emit_discard_rectangle(cmd_buffer); + if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) + radv_emit_sample_locations(cmd_buffer); + cmd_buffer->state.dirty &= ~states; } @@ -1840,7 +2214,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, if (flush_indirect_descriptors) radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, MAX_SETS * MESA_SHADER_STAGES * 4); @@ -1886,6 +2260,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, radv_get_descriptors_state(cmd_buffer, bind_point); struct radv_pipeline_layout *layout = pipeline->layout; struct radv_shader_variant *shader, *prev_shader; + bool need_push_constants = false; unsigned offset; void *ptr; uint64_t va; @@ -1895,37 +2270,56 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, (!layout->push_constant_size && !layout->dynamic_offset_count)) return; - if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + - 16 * layout->dynamic_offset_count, - 256, &offset, &ptr)) - return; + radv_foreach_stage(stage, stages) { + if (!pipeline->shaders[stage]) + continue; - memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); - memcpy((char*)ptr + layout->push_constant_size, - descriptors_state->dynamic_buffers, - 16 * layout->dynamic_offset_count); + need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants; + need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets; - va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); - va += offset; + uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts; + uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts; - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, MESA_SHADER_STAGES * 4); + radv_emit_inline_push_consts(cmd_buffer, pipeline, stage, + AC_UD_INLINE_PUSH_CONSTANTS, + count, + (uint32_t *)&cmd_buffer->push_constants[base * 4]); + } - prev_shader = NULL; - radv_foreach_stage(stage, stages) { - shader = radv_get_shader(pipeline, stage); + if (need_push_constants) { + if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + + 16 * layout->dynamic_offset_count, + 256, &offset, &ptr)) + return; + + memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); + memcpy((char*)ptr + layout->push_constant_size, + descriptors_state->dynamic_buffers, + 16 * layout->dynamic_offset_count); + + va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); + va += offset; - /* Avoid redundantly emitting the address for merged stages. */ - if (shader && shader != prev_shader) { - radv_emit_userdata_address(cmd_buffer, pipeline, stage, - AC_UD_PUSH_CONSTANTS, va); + ASSERTED unsigned cdw_max = + radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, MESA_SHADER_STAGES * 4); - prev_shader = shader; + prev_shader = NULL; + radv_foreach_stage(stage, stages) { + shader = radv_get_shader(pipeline, stage); + + /* Avoid redundantly emitting the address for merged stages. */ + if (shader && shader != prev_shader) { + radv_emit_userdata_address(cmd_buffer, pipeline, stage, + AC_UD_PUSH_CONSTANTS, va); + + prev_shader = shader; + } } + assert(cmd_buffer->cs->cdw <= cdw_max); } cmd_buffer->push_constant_stages &= ~stages; - assert(cmd_buffer->cs->cdw <= cdw_max); } static void @@ -1934,13 +2328,13 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, { if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && - cmd_buffer->state.pipeline->vertex_elements.count && + cmd_buffer->state.pipeline->num_vertex_bindings && radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; unsigned vb_offset; void *vb_ptr; uint32_t i = 0; - uint32_t count = velems->count; + uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings; uint64_t va; /* allocate some descriptor state for vertex buffers */ @@ -1951,21 +2345,35 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, for (i = 0; i < count; i++) { uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; uint32_t offset; - int vb = velems->binding[i]; - struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer; - uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; + struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; + uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + + if (!buffer) + continue; va = radv_buffer_get_va(buffer->bo); - offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i]; + offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) + if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; else desc[2] = buffer->size - offset; - desc[3] = velems->rsrc_word3[i]; + desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) | + S_008F0C_OOB_SELECT(1) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2003,7 +2411,7 @@ radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va) base_reg + loc->sgpr_idx * 4, va, false); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS]; if (loc->sgpr_idx != -1) { base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -2043,7 +2451,7 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) /* Set the descriptor. * - * On VI, the format must be non-INVALID, otherwise + * On GFX8, the format must be non-INVALID, otherwise * the buffer will be considered not bound and store * instructions will be no-ops. */ @@ -2053,8 +2461,15 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(3) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2075,28 +2490,96 @@ radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); } +struct radv_draw_info { + /** + * Number of vertices. + */ + uint32_t count; + + /** + * Index of the first vertex. + */ + int32_t vertex_offset; + + /** + * First instance id. + */ + uint32_t first_instance; + + /** + * Number of instances. + */ + uint32_t instance_count; + + /** + * First index (indexed draws only). + */ + uint32_t first_index; + + /** + * Whether it's an indexed draw. + */ + bool indexed; + + /** + * Indirect draw parameters resource. + */ + struct radv_buffer *indirect; + uint64_t indirect_offset; + uint32_t stride; + + /** + * Draw count parameters resource. + */ + struct radv_buffer *count_buffer; + uint64_t count_buffer_offset; + + /** + * Stream output parameters resource. + */ + struct radv_buffer *strmout_buffer; + uint64_t strmout_buffer_offset; +}; + +static uint32_t +radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) +{ + switch (cmd_buffer->state.index_type) { + case V_028A7C_VGT_INDEX_8: + return 0xffu; + case V_028A7C_VGT_INDEX_16: + return 0xffffu; + case V_028A7C_VGT_INDEX_32: + return 0xffffffffu; + default: + unreachable("invalid index type"); + } +} + static void -radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, - bool instanced_draw, bool indirect_draw, - uint32_t draw_vertex_count) +si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, + bool instanced_draw, bool indirect_draw, + bool count_from_stream_output, + uint32_t draw_vertex_count) { struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; struct radv_cmd_state *state = &cmd_buffer->state; struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint32_t ia_multi_vgt_param; - int32_t primitive_reset_en; + unsigned ia_multi_vgt_param; - /* Draw state. */ ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, - indirect_draw, draw_vertex_count); + indirect_draw, + count_from_stream_output, + draw_vertex_count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { - if (info->chip_class >= GFX9) { - radeon_set_uconfig_reg_idx(cs, + if (info->chip_class == GFX9) { + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); - } else if (info->chip_class >= CIK) { + } else if (info->chip_class >= GFX7) { radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); @@ -2106,10 +2589,28 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, } state->last_ia_multi_vgt_param = ia_multi_vgt_param; } +} + +static void +radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, + const struct radv_draw_info *draw_info) +{ + struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; + struct radv_cmd_state *state = &cmd_buffer->state; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + int32_t primitive_reset_en; + + /* Draw state. */ + if (info->chip_class < GFX10) { + si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, + draw_info->indirect, + !!draw_info->strmout_buffer, + draw_info->indirect ? 0 : draw_info->count); + } /* Primitive restart. */ primitive_reset_en = - indexed_draw && state->pipeline->graphics.prim_restart_enable; + draw_info->indexed && state->pipeline->graphics.prim_restart_enable; if (primitive_reset_en != state->last_primitive_reset_en) { state->last_primitive_reset_en = primitive_reset_en; @@ -2126,7 +2627,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, if (primitive_reset_en) { uint32_t primitive_reset_index = - state->index_type ? 0xffffffffu : 0xffffu; + radv_get_primitive_reset_index(cmd_buffer); if (primitive_reset_index != state->last_primitive_reset_index) { radeon_set_context_reg(cs, @@ -2135,11 +2636,32 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, state->last_primitive_reset_index = primitive_reset_index; } } -} -static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, - VkPipelineStageFlags src_stage_mask) -{ + if (draw_info->strmout_buffer) { + uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo); + + va += draw_info->strmout_buffer->offset + + draw_info->strmout_buffer_offset; + + radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, + draw_info->stride); + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_REG) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); + radeon_emit(cs, 0); /* unused */ + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo); + } +} + +static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, + VkPipelineStageFlags src_stage_mask) +{ if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | @@ -2188,7 +2710,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_SHADER_WRITE_BIT: case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT: case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: - flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_WB_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; @@ -2203,7 +2725,7 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFER_WRITE_BIT: flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB | - RADV_CMD_FLAG_INV_GLOBAL_L2; + RADV_CMD_FLAG_INV_L2; if (flush_CB_meta) flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -2239,7 +2761,9 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, if (!radv_image_has_htile(image)) flush_DB_meta = false; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + /* TODO: implement shader coherent for GFX10 */ + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { if (image->info.samples == 1 && (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) && @@ -2260,19 +2784,19 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: break; case VK_ACCESS_UNIFORM_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE; break; case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: case VK_ACCESS_TRANSFER_READ_BIT: case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | - RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | + RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_SHADER_READ_BIT: - flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; + flush_bits |= RADV_CMD_FLAG_INV_VCACHE; if (!image_is_coherent) - flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; + flush_bits |= RADV_CMD_FLAG_INV_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: if (flush_CB) @@ -2303,11 +2827,67 @@ void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, NULL); } +uint32_t +radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = state->subpass - state->pass->subpasses; + + /* The id of this subpass shouldn't exceed the number of subpasses in + * this render pass minus 1. + */ + assert(subpass_id < state->pass->subpass_count); + return subpass_id; +} + +static struct radv_sample_locations_state * +radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, + uint32_t att_idx, + bool begin_subpass) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + + if (view->image->info.samples == 1) + return NULL; + + if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) { + /* Return the initial sample locations if this is the initial + * layout transition of the given subpass attachemnt. + */ + if (state->attachments[att_idx].sample_location.count > 0) + return &state->attachments[att_idx].sample_location; + } else { + /* Otherwise return the subpass sample locations if defined. */ + if (state->subpass_sample_locs) { + /* Because the driver sets the current subpass before + * initial layout transitions, we should use the sample + * locations from the previous subpass to avoid an + * off-by-one problem. Otherwise, use the sample + * locations for the current subpass for final layout + * transitions. + */ + if (begin_subpass) + subpass_id--; + + for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) { + if (state->subpass_sample_locs[i].subpass_idx == subpass_id) + return &state->subpass_sample_locs[i].sample_location; + } + } + } + + return NULL; +} + static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, - struct radv_subpass_attachment att) + struct radv_subpass_attachment att, + bool begin_subpass) { unsigned idx = att.attachment; struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; range.aspectMask = 0; range.baseMipLevel = view->base_mip; @@ -2315,7 +2895,7 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.baseArrayLayer = view->base_layer; range.layerCount = cmd_buffer->state.framebuffer->layers; - if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) { + if (cmd_buffer->state.subpass->view_mask) { /* If the current subpass uses multiview, the driver might have * performed a fast color/depth clear to the whole image * (including all layers). To make sure the driver will @@ -2326,10 +2906,16 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask); } + /* Get the subpass sample locations for the given attachment, if NULL + * is returned the driver will use the default HW locations. + */ + sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, + begin_subpass); + radv_handle_image_transition(cmd_buffer, view->image, cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range); + att.layout, 0, 0, &range, sample_locs); cmd_buffer->state.attachments[idx].current_layout = att.layout; @@ -2338,31 +2924,94 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf void radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, - const struct radv_subpass *subpass, bool transitions) + const struct radv_subpass *subpass) { - if (transitions) { - radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + cmd_buffer->state.subpass = subpass; - for (unsigned i = 0; i < subpass->color_count; ++i) { - if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) - radv_handle_subpass_image_transition(cmd_buffer, - subpass->color_attachments[i]); - } + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; +} - for (unsigned i = 0; i < subpass->input_count; ++i) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->input_attachments[i]); - } +static VkResult +radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, + struct radv_render_pass *pass, + const VkRenderPassBeginInfo *info) +{ + const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs = + vk_find_struct_const(info->pNext, + RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_framebuffer *framebuffer = state->framebuffer; - if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->depth_stencil_attachment); - } + if (!sample_locs) { + state->subpass_sample_locs = NULL; + return VK_SUCCESS; } - cmd_buffer->state.subpass = subpass; + for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) { + const VkAttachmentSampleLocationsEXT *att_sample_locs = + &sample_locs->pAttachmentInitialSampleLocations[i]; + uint32_t att_idx = att_sample_locs->attachmentIndex; + struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; + struct radv_image *image = att->attachment->image; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; + assert(vk_format_is_depth_or_stencil(image->vk_format)); + + /* From the Vulkan spec 1.1.108: + * + * "If the image referenced by the framebuffer attachment at + * index attachmentIndex was not created with + * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT + * then the values specified in sampleLocationsInfo are + * ignored." + */ + if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) + continue; + + const VkSampleLocationsInfoEXT *sample_locs_info = + &att_sample_locs->sampleLocationsInfo; + + state->attachments[att_idx].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->attachments[att_idx].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->attachments[att_idx].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->attachments[att_idx].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc, + sample_locs->postSubpassSampleLocationsCount * + sizeof(state->subpass_sample_locs[0]), + 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (state->subpass_sample_locs == NULL) { + cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY; + return cmd_buffer->record_result; + } + + state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount; + + for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) { + const VkSubpassSampleLocationsEXT *subpass_sample_locs_info = + &sample_locs->pPostSubpassSampleLocations[i]; + const VkSampleLocationsInfoEXT *sample_locs_info = + &subpass_sample_locs_info->sampleLocationsInfo; + + state->subpass_sample_locs[i].subpass_idx = + subpass_sample_locs_info->subpassIndex; + state->subpass_sample_locs[i].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->subpass_sample_locs[i].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->subpass_sample_locs[i].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + return VK_SUCCESS; } static VkResult @@ -2419,6 +3068,7 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].sample_location.count = 0; } return VK_SUCCESS; @@ -2542,7 +3192,7 @@ VkResult radv_BeginCommandBuffer( if (result != VK_SUCCESS) return result; - radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); } if (unlikely(cmd_buffer->device->trace_bo)) { @@ -2598,6 +3248,36 @@ void radv_CmdBindVertexBuffers( cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER; } +static uint32_t +vk_to_index_type(VkIndexType type) +{ + switch (type) { + case VK_INDEX_TYPE_UINT8_EXT: + return V_028A7C_VGT_INDEX_8; + case VK_INDEX_TYPE_UINT16: + return V_028A7C_VGT_INDEX_16; + case VK_INDEX_TYPE_UINT32: + return V_028A7C_VGT_INDEX_32; + default: + unreachable("invalid index type"); + } +} + +static uint32_t +radv_get_vgt_index_size(uint32_t type) +{ + switch (type) { + case V_028A7C_VGT_INDEX_8: + return 1; + case V_028A7C_VGT_INDEX_16: + return 2; + case V_028A7C_VGT_INDEX_32: + return 4; + default: + unreachable("invalid index type"); + } +} + void radv_CmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, @@ -2616,12 +3296,12 @@ void radv_CmdBindIndexBuffer( cmd_buffer->state.index_buffer = index_buffer; cmd_buffer->state.index_offset = offset; - cmd_buffer->state.index_type = indexType; /* vk matches hw */ + cmd_buffer->state.index_type = vk_to_index_type(indexType); cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo); cmd_buffer->state.index_va += index_buffer->offset + offset; - int index_size_shift = cmd_buffer->state.index_type ? 2 : 1; - cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift; + int index_size = radv_get_vgt_index_size(indexType); + cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo); } @@ -2685,9 +3365,17 @@ void radv_CmdBindDescriptorSets( dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | - S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(3) | + S_008F0C_RESOURCE_LEVEL(1); + } else { + dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } + cmd_buffer->push_constant_stages |= set->layout->dynamic_shader_stages; } @@ -2778,6 +3466,14 @@ void radv_CmdPushDescriptorSetKHR( pipelineBindPoint)) return; + /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR() + * because it is invalid, according to Vulkan spec. + */ + for (int i = 0; i < descriptorWriteCount; i++) { + ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; + assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); + } + radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer, radv_descriptor_set_to_handle(push_set), descriptorWriteCount, pDescriptorWrites, 0, NULL); @@ -2788,7 +3484,7 @@ void radv_CmdPushDescriptorSetKHR( void radv_CmdPushDescriptorSetWithTemplateKHR( VkCommandBuffer commandBuffer, - VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout _layout, uint32_t set, const void* pData) @@ -2832,8 +3528,14 @@ VkResult radv_EndCommandBuffer( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { - if (cmd_buffer->device->physical_device->rad_info.chip_class == SI) - cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2; + + /* Make sure to sync all pending active queries at the end of + * command buffer. + */ + cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; + si_emit_cache_flush(cmd_buffer); } @@ -2843,6 +3545,7 @@ VkResult radv_EndCommandBuffer( si_cp_dma_wait_for_idle(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); @@ -2860,6 +3563,8 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) return; + assert(!pipeline->ctx_cs.cdw); + cmd_buffer->state.emitted_compute_pipeline = pipeline; radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw); @@ -2946,11 +3651,16 @@ void radv_CmdSetViewport( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount; + ASSERTED const uint32_t total_count = firstViewport + viewportCount; assert(firstViewport < MAX_VIEWPORTS); assert(total_count >= 1 && total_count <= MAX_VIEWPORTS); + if (!memcmp(state->dynamic.viewport.viewports + firstViewport, + pViewports, viewportCount * sizeof(*pViewports))) { + return; + } + memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports, viewportCount * sizeof(*pViewports)); @@ -2965,11 +3675,16 @@ void radv_CmdSetScissor( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount; + ASSERTED const uint32_t total_count = firstScissor + scissorCount; assert(firstScissor < MAX_SCISSORS); assert(total_count >= 1 && total_count <= MAX_SCISSORS); + if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors, + scissorCount * sizeof(*pScissors))) { + return; + } + memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors, scissorCount * sizeof(*pScissors)); @@ -2981,6 +3696,10 @@ void radv_CmdSetLineWidth( float lineWidth) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + if (cmd_buffer->state.dynamic.line_width == lineWidth) + return; + cmd_buffer->state.dynamic.line_width = lineWidth; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; } @@ -2992,12 +3711,19 @@ void radv_CmdSetDepthBias( float depthBiasSlopeFactor) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_bias.bias == depthBiasConstantFactor && + state->dynamic.depth_bias.clamp == depthBiasClamp && + state->dynamic.depth_bias.slope == depthBiasSlopeFactor) { + return; + } - cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; - cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; - cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; + state->dynamic.depth_bias.bias = depthBiasConstantFactor; + state->dynamic.depth_bias.clamp = depthBiasClamp; + state->dynamic.depth_bias.slope = depthBiasSlopeFactor; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; } void radv_CmdSetBlendConstants( @@ -3005,11 +3731,14 @@ void radv_CmdSetBlendConstants( const float blendConstants[4]) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4)) + return; - memcpy(cmd_buffer->state.dynamic.blend_constants, - blendConstants, sizeof(float) * 4); + memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4); - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; } void radv_CmdSetDepthBounds( @@ -3018,11 +3747,17 @@ void radv_CmdSetDepthBounds( float maxDepthBounds) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_bounds.min == minDepthBounds && + state->dynamic.depth_bounds.max == maxDepthBounds) { + return; + } - cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; - cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; + state->dynamic.depth_bounds.min = minDepthBounds; + state->dynamic.depth_bounds.max = maxDepthBounds; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; } void radv_CmdSetStencilCompareMask( @@ -3031,13 +3766,21 @@ void radv_CmdSetStencilCompareMask( uint32_t compareMask) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_compare_mask.front == compareMask; + bool back_same = state->dynamic.stencil_compare_mask.back == compareMask; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) - cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; + state->dynamic.stencil_compare_mask.front = compareMask; if (faceMask & VK_STENCIL_FACE_BACK_BIT) - cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; + state->dynamic.stencil_compare_mask.back = compareMask; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; } void radv_CmdSetStencilWriteMask( @@ -3046,13 +3789,21 @@ void radv_CmdSetStencilWriteMask( uint32_t writeMask) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_write_mask.front == writeMask; + bool back_same = state->dynamic.stencil_write_mask.back == writeMask; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) - cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; + state->dynamic.stencil_write_mask.front = writeMask; if (faceMask & VK_STENCIL_FACE_BACK_BIT) - cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; + state->dynamic.stencil_write_mask.back = writeMask; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; } void radv_CmdSetStencilReference( @@ -3061,6 +3812,14 @@ void radv_CmdSetStencilReference( uint32_t reference) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_reference.front == reference; + bool back_same = state->dynamic.stencil_reference.back == reference; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) cmd_buffer->state.dynamic.stencil_reference.front = reference; @@ -3078,17 +3837,41 @@ void radv_CmdSetDiscardRectangleEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_cmd_state *state = &cmd_buffer->state; - MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; + ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount; assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES); assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES); + if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle, + pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) { + return; + } + typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle], pDiscardRectangles, discardRectangleCount); state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE; } +void radv_CmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS); + + state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel; + state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize; + state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount; + typed_memcpy(&state->dynamic.sample_location.locations[0], + pSampleLocationsInfo->pSampleLocations, + pSampleLocationsInfo->sampleLocationsCount); + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS; +} + void radv_CmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, @@ -3118,6 +3901,15 @@ void radv_CmdExecuteCommands( if (secondary->sample_positions_needed) primary->sample_positions_needed = true; + if (!secondary->state.framebuffer && + (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) { + /* Emit the framebuffer state from primary if secondary + * has been recorded without a framebuffer, otherwise + * fast color/depth clears can't work. + */ + radv_emit_framebuffer_state(primary); + } + primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); @@ -3248,7 +4040,7 @@ VkResult radv_ResetCommandPool( void radv_TrimCommandPool( VkDevice device, VkCommandPool commandPool, - VkCommandPoolTrimFlagsKHR flags) + VkCommandPoolTrimFlags flags) { RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); @@ -3261,6 +4053,58 @@ void radv_TrimCommandPool( } } +static void +radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, + uint32_t subpass_id) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; + + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, 4096); + + radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + radv_handle_subpass_image_transition(cmd_buffer, + subpass->attachments[i], + true); + } + + radv_cmd_buffer_clear_subpass(cmd_buffer); + + assert(cmd_buffer->cs->cdw <= cdw_max); +} + +static void +radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + const struct radv_subpass *subpass = state->subpass; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + + radv_cmd_buffer_resolve_subpass(cmd_buffer); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + if (state->pass->attachments[a].last_subpass_idx != subpass_id) + continue; + + VkImageLayout layout = state->pass->attachments[a].final_layout; + struct radv_subpass_attachment att = { a, layout }; + radv_handle_subpass_image_transition(cmd_buffer, att, false); + } +} + void radv_CmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, @@ -3269,10 +4113,7 @@ void radv_CmdBeginRenderPass( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); - - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, 2048); - MAYBE_UNUSED VkResult result; + VkResult result; cmd_buffer->state.framebuffer = framebuffer; cmd_buffer->state.pass = pass; @@ -3282,10 +4123,11 @@ void radv_CmdBeginRenderPass( if (result != VK_SUCCESS) return; - radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); - assert(cmd_buffer->cs->cdw <= cdw_max); + result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin); + if (result != VK_SUCCESS) + return; - radv_cmd_buffer_clear_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } void radv_CmdBeginRenderPass2KHR( @@ -3303,13 +4145,9 @@ void radv_CmdNextSubpass( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, - 2048); - - radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); - radv_cmd_buffer_clear_subpass(cmd_buffer); + uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer); + radv_cmd_buffer_end_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1); } void radv_CmdNextSubpass2KHR( @@ -3334,7 +4172,7 @@ static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned in radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index); } - if (pipeline->gs_copy_shader) { + if (radv_pipeline_has_gs_copy_shader(pipeline)) { struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX]; if (loc->sgpr_idx != -1) { uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; @@ -3412,57 +4250,6 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer, } } -struct radv_draw_info { - /** - * Number of vertices. - */ - uint32_t count; - - /** - * Index of the first vertex. - */ - int32_t vertex_offset; - - /** - * First instance id. - */ - uint32_t first_instance; - - /** - * Number of instances. - */ - uint32_t instance_count; - - /** - * First index (indexed draws only). - */ - uint32_t first_index; - - /** - * Whether it's an indexed draw. - */ - bool indexed; - - /** - * Indirect draw parameters resource. - */ - struct radv_buffer *indirect; - uint64_t indirect_offset; - uint32_t stride; - - /** - * Draw count parameters resource. - */ - struct radv_buffer *count_buffer; - uint64_t count_buffer_offset; - - /** - * Stream output parameters resource. - */ - struct radv_buffer *strmout_buffer; - uint64_t strmout_buffer_offset; -}; - static void radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, const struct radv_draw_info *info) @@ -3471,27 +4258,6 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys *ws = cmd_buffer->device->ws; struct radeon_cmdbuf *cs = cmd_buffer->cs; - if (info->strmout_buffer) { - uint64_t va = radv_buffer_get_va(info->strmout_buffer->bo); - - va += info->strmout_buffer->offset + - info->strmout_buffer_offset; - - radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, - info->stride); - - radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); - radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | - COPY_DATA_DST_SEL(COPY_DATA_REG) | - COPY_DATA_WR_CONFIRM); - radeon_emit(cs, va); - radeon_emit(cs, va >> 32); - radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); - radeon_emit(cs, 0); /* unused */ - - radv_cs_add_buffer(ws, cs, info->strmout_buffer->bo); - } - if (info->indirect) { uint64_t va = radv_buffer_get_va(info->indirect->bo); uint64_t count_va = 0; @@ -3554,7 +4320,7 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, } if (info->indexed) { - int index_size = state->index_type ? 4 : 2; + int index_size = radv_get_vgt_index_size(state->index_type); uint64_t index_va; index_va = state->index_va; @@ -3610,32 +4376,34 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, * any context registers. */ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, - bool indexed_draw) + const struct radv_draw_info *info) { struct radv_cmd_state *state = &cmd_buffer->state; if (!cmd_buffer->device->physical_device->has_scissor_bug) return false; + if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer) + return true; + uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL; /* Index, vertex and streamout buffers don't change context regs, and - * pipeline is handled later. + * pipeline is already handled. */ used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_STREAMOUT_BUFFER | RADV_CMD_DIRTY_PIPELINE); - /* Assume all state changes except these two can imply context rolls. */ if (cmd_buffer->state.dirty & used_states) return true; - if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline) - return true; + uint32_t primitive_reset_index = + radv_get_primitive_reset_index(cmd_buffer); - if (indexed_draw && state->pipeline->graphics.prim_restart_enable && - (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index) + if (info->indexed && state->pipeline->graphics.prim_restart_enable && + primitive_reset_index != state->last_primitive_reset_index) return true; return false; @@ -3645,7 +4413,7 @@ static void radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, const struct radv_draw_info *info) { - bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed); + bool late_scissor_emission; if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) || cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline) @@ -3654,6 +4422,12 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) radv_emit_graphics_pipeline(cmd_buffer); + /* This should be before the cmd_buffer->state.dirty is cleared + * (excluding RADV_CMD_DIRTY_PIPELINE) and after + * cmd_buffer->state.context_roll_without_scissor_emitted is set. */ + late_scissor_emission = + radv_need_late_scissor_emission(cmd_buffer, info); + if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) radv_emit_framebuffer_state(cmd_buffer); @@ -3661,11 +4435,11 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER) radv_emit_index_buffer(cmd_buffer); } else { - /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, + /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE, * so the state must be re-emitted before the next indexed * draw. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { cmd_buffer->state.last_index_type = -1; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; } @@ -3673,9 +4447,7 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, radv_cmd_buffer_flush_dynamic_state(cmd_buffer); - radv_emit_draw_registers(cmd_buffer, info->indexed, - info->instance_count > 1, info->indirect, - info->indirect ? 0 : info->count); + radv_emit_draw_registers(cmd_buffer, info); if (late_scissor_emission) radv_emit_scissor(cmd_buffer); @@ -3688,15 +4460,28 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, struct radeon_info *rad_info = &cmd_buffer->device->physical_device->rad_info; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; - MAYBE_UNUSED unsigned cdw_max = + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); + if (likely(!info->indirect)) { + /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is + * no workaround for indirect draws, but we can at least skip + * direct draws. + */ + if (unlikely(!info->instance_count)) + return; + + /* Handle count == 0. */ + if (unlikely(!info->count && !info->strmout_buffer)) + return; + } + /* Use optimal packet order based on whether we need to sync the * pipeline. */ @@ -3848,55 +4633,6 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - -void radv_CmdDrawIndexedIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.indexed = true; - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - void radv_CmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer _buffer, @@ -3984,7 +4720,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25); + ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25); if (info->indirect) { uint64_t va = radv_buffer_get_va(info->indirect->bo); @@ -4066,7 +4802,6 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, } if (loc->sgpr_idx != -1) { - assert(!loc->indirect); assert(loc->num_sgprs == 3); radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + @@ -4113,7 +4848,7 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer, { struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = pipeline && pipeline != cmd_buffer->state.emitted_compute_pipeline; @@ -4234,20 +4969,16 @@ void radv_CmdEndRenderPass( radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { - VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; - radv_handle_subpass_image_transition(cmd_buffer, - (struct radv_subpass_attachment){i, layout}); - } + radv_cmd_buffer_end_subpass(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); cmd_buffer->state.pass = NULL; cmd_buffer->state.subpass = NULL; cmd_buffer->state.attachments = NULL; cmd_buffer->state.framebuffer = NULL; + cmd_buffer->state.subpass_sample_locs = NULL; } void radv_CmdEndRenderPass2KHR( @@ -4271,19 +5002,14 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, { assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); - unsigned layer_count = radv_get_layerCount(image, range); - uint64_t size = image->surface.htile_slice_size * layer_count; VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; - uint64_t offset = image->offset + image->htile_offset + - image->surface.htile_slice_size * range->baseArrayLayer; struct radv_cmd_state *state = &cmd_buffer->state; VkClearDepthStencilValue value = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset, - size, clear_word); + state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4308,15 +5034,21 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe VkImageLayout dst_layout, unsigned src_queue_mask, unsigned dst_queue_mask, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (!radv_image_has_htile(image)) return; - if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && - radv_layout_has_htile(image, dst_layout, dst_queue_mask)) { - /* TODO: merge with the clear if applicable */ - radv_initialize_htile(cmd_buffer, image, range, 0); + if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { + uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; + + if (radv_layout_is_htile_compressed(image, dst_layout, + dst_queue_mask)) { + clear_value = 0; + } + + radv_initialize_htile(cmd_buffer, image, range, clear_value); } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; @@ -4331,7 +5063,8 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range); + radv_decompress_depth_image_inplace(cmd_buffer, image, + &local_range, sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4339,20 +5072,23 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe } static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, + uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image) + struct radv_image *image, + const VkImageSubresourceRange *range) { struct radv_cmd_state *state = &cmd_buffer->state; static const uint32_t fmask_clear_values[4] = { @@ -4367,20 +5103,50 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value); + state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value); + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value); + + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) { + /* When DCC is enabled with mipmaps, some levels might not + * support fast clears and we have to initialize them as "fully + * expanded". + */ + /* Compute the size of all fast clearable DCC levels. */ + for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[i]; + unsigned dcc_fast_clear_size = + surf_level->dcc_slice_fast_clear_size * image->info.array_size; + + if (!dcc_fast_clear_size) + break; + + size = surf_level->dcc_offset + dcc_fast_clear_size; + } + + /* Initialize the mipmap levels without DCC. */ + if (size != image->planes[0].surface.dcc_size) { + state->flush_bits |= + radv_fill_buffer(cmd_buffer, image->bo, + image->offset + image->dcc_offset + size, + image->planes[0].surface.dcc_size - size, + 0xffffffff); + } + } state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -4394,7 +5160,8 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, VkImageLayout src_layout, VkImageLayout dst_layout, unsigned src_queue_mask, - unsigned dst_queue_mask) + unsigned dst_queue_mask, + const VkImageSubresourceRange *range) { if (radv_image_has_cmask(image)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ @@ -4404,14 +5171,14 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, value = 0xccccccccu; } - radv_initialise_cmask(cmd_buffer, image, value); + radv_initialise_cmask(cmd_buffer, image, range, value); } if (radv_image_has_fmask(image)) { - radv_initialize_fmask(cmd_buffer, image); + radv_initialize_fmask(cmd_buffer, image, range); } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; @@ -4421,15 +5188,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, need_decompress_pass = true; } - radv_initialize_dcc(cmd_buffer, image, value); + radv_initialize_dcc(cmd_buffer, image, range, value); - radv_update_fce_metadata(cmd_buffer, image, + radv_update_fce_metadata(cmd_buffer, image, range, need_decompress_pass); } - if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) { + if (radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t color_values[2] = {}; - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, range, + color_values); } } @@ -4447,13 +5216,14 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, src_layout, dst_layout, - src_queue_mask, dst_queue_mask); + src_queue_mask, dst_queue_mask, + range); return; } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { - radv_initialize_dcc(cmd_buffer, image, 0xffffffffu); + radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); @@ -4462,17 +5232,28 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { + bool fce_eliminate = false, fmask_expand = false; + if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { - radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + fce_eliminate = true; } if (radv_image_has_fmask(image)) { if (src_layout != VK_IMAGE_LAYOUT_GENERAL && dst_layout == VK_IMAGE_LAYOUT_GENERAL) { - radv_expand_fmask_image_inplace(cmd_buffer, image, range); + /* A FMASK decompress is required before doing + * a MSAA decompress using FMASK. + */ + fmask_expand = true; } } + + if (fce_eliminate || fmask_expand) + radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + + if (fmask_expand) + radv_expand_fmask_image_inplace(cmd_buffer, image, range); } } @@ -4482,7 +5263,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (image->exclusive && src_family != dst_family) { /* This is an acquire or a release operation and there will be @@ -4492,6 +5274,10 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, assert(src_family == cmd_buffer->queue_family_index || dst_family == cmd_buffer->queue_family_index); + if (src_family == VK_QUEUE_FAMILY_EXTERNAL || + src_family == VK_QUEUE_FAMILY_FOREIGN_EXT) + return; + if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) return; @@ -4501,6 +5287,9 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, return; } + if (src_layout == dst_layout) + return; + unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index); @@ -4512,7 +5301,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, radv_handle_depth_image_transition(cmd_buffer, image, src_layout, dst_layout, src_queue_mask, dst_queue_mask, - range); + range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, src_layout, dst_layout, @@ -4525,6 +5314,7 @@ struct radv_barrier_info { uint32_t eventCount; const VkEvent *pEvents; VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; }; static void @@ -4547,7 +5337,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff); assert(cmd_buffer->cs->cdw <= cdw_max); @@ -4576,17 +5366,46 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, image); } - radv_stage_flush(cmd_buffer, info->srcStageMask); + /* The Vulkan spec 1.1.98 says: + * + * "An execution dependency with only + * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask + * will only prevent that stage from executing in subsequently + * submitted commands. As this stage does not perform any actual + * execution, this is not observable - in effect, it does not delay + * processing of subsequent commands. Similarly an execution dependency + * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask + * will effectively not wait for any prior commands to complete." + */ + if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) + radv_stage_flush(cmd_buffer, info->srcStageMask); cmd_buffer->state.flush_bits |= src_flush_bits; for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); + + const struct VkSampleLocationsInfoEXT *sample_locs_info = + vk_find_struct_const(pImageMemoryBarriers[i].pNext, + SAMPLE_LOCATIONS_INFO_EXT); + struct radv_sample_locations_state sample_locations = {}; + + if (sample_locs_info) { + assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT); + sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel; + sample_locations.grid_size = sample_locs_info->sampleLocationGridSize; + sample_locations.count = sample_locs_info->sampleLocationsCount; + typed_memcpy(&sample_locations.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, - &pImageMemoryBarriers[i].subresourceRange); + &pImageMemoryBarriers[i].subresourceRange, + sample_locs_info ? &sample_locations : NULL); } /* Make sure CP DMA is idle because the driver might have performed a @@ -4617,6 +5436,7 @@ void radv_CmdPipelineBarrier( info.eventCount = 0; info.pEvents = NULL; info.srcStageMask = srcStageMask; + info.dstStageMask = destStageMask; radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, @@ -4636,7 +5456,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); /* Flags that only require a top-of-pipe event. */ VkPipelineStageFlags top_of_pipe_flags = @@ -4660,7 +5480,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, if (!(stageMask & ~top_of_pipe_flags)) { /* Just need to sync the PFP engine. */ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); @@ -4669,7 +5489,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, } else if (!(stageMask & ~post_index_fetch_flags)) { /* Sync ME because PFP reads index and indirect buffers. */ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME)); radeon_emit(cs, va); @@ -4681,7 +5501,8 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, - EOP_DATA_SEL_VALUE_32BIT, va, 2, value, + EOP_DST_SEL_MEM, + EOP_DATA_SEL_VALUE_32BIT, va, value, cmd_buffer->gfx9_eop_bug_va); } @@ -4746,8 +5567,11 @@ void radv_CmdBeginConditionalRenderingEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; bool draw_visible = true; - uint64_t va; + uint64_t pred_value = 0; + uint64_t va, new_va; + unsigned pred_offset; va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset; @@ -4761,13 +5585,53 @@ void radv_CmdBeginConditionalRenderingEXT( draw_visible = false; } + si_emit_cache_flush(cmd_buffer); + + /* From the Vulkan spec 1.1.107: + * + * "If the 32-bit value at offset in buffer memory is zero, then the + * rendering commands are discarded, otherwise they are executed as + * normal. If the value of the predicate in buffer memory changes while + * conditional rendering is active, the rendering commands may be + * discarded in an implementation-dependent way. Some implementations + * may latch the value of the predicate upon beginning conditional + * rendering while others may read it before every rendering command." + * + * But, the AMD hardware treats the predicate as a 64-bit value which + * means we need a workaround in the driver. Luckily, it's not required + * to support if the value changes when predication is active. + * + * The workaround is as follows: + * 1) allocate a 64-value in the upload BO and initialize it to 0 + * 2) copy the 32-bit predicate value to the upload BO + * 3) use the new allocated VA address for predication + * + * Based on the conditionalrender demo, it's faster to do the COPY_DATA + * in ME (+ sync PFP) instead of PFP. + */ + radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset); + + new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset; + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, new_va); + radeon_emit(cs, new_va >> 32); + + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); + radeon_emit(cs, 0); + /* Enable predication for this command buffer. */ - si_emit_set_predication_state(cmd_buffer, draw_visible, va); + si_emit_set_predication_state(cmd_buffer, draw_visible, new_va); cmd_buffer->state.predicating = true; /* Store conditional rendering user info. */ cmd_buffer->state.predication_type = draw_visible; - cmd_buffer->state.predication_va = va; + cmd_buffer->state.predication_va = new_va; } void radv_CmdEndConditionalRenderingEXT( @@ -4811,7 +5675,7 @@ void radv_CmdBindTransformFeedbackBuffersEXT( enabled_mask |= 1 << idx; } - cmd_buffer->state.streamout.enabled_mask = enabled_mask; + cmd_buffer->state.streamout.enabled_mask |= enabled_mask; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER; } @@ -4831,6 +5695,8 @@ radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer) S_028B94_STREAMOUT_3_EN(so->streamout_enabled)); radeon_emit(cs, so->hw_enabled_mask & so->enabled_stream_buffers_mask); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -4858,7 +5724,7 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) unsigned reg_strmout_cntl; /* The register is at different places on different ASICs. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL; radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0); } else { @@ -4878,14 +5744,14 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) radeon_emit(cs, 4); /* poll interval */ } -void radv_CmdBeginTransformFeedbackEXT( - VkCommandBuffer commandBuffer, - uint32_t firstCounterBuffer, - uint32_t counterBufferCount, - const VkBuffer* pCounterBuffers, - const VkDeviceSize* pCounterBufferOffsets) +static void +radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) + { - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; @@ -4899,7 +5765,7 @@ void radv_CmdBeginTransformFeedbackEXT( if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) counter_buffer_idx = -1; - /* SI binds streamout buffers as shader resources. + /* AMD GCN binds streamout buffers as shader resources. * VGT only counts primitives and tells the shader through * SGPRs what to do. */ @@ -4907,6 +5773,8 @@ void radv_CmdBeginTransformFeedbackEXT( radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */ radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */ + cmd_buffer->state.context_roll_without_scissor_emitted = true; + if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) { /* The array of counter buffers is optional. */ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); @@ -4941,7 +5809,7 @@ void radv_CmdBeginTransformFeedbackEXT( radv_set_streamout_enable(cmd_buffer, true); } -void radv_CmdEndTransformFeedbackEXT( +void radv_CmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, @@ -4949,6 +5817,19 @@ void radv_CmdEndTransformFeedbackEXT( const VkDeviceSize* pCounterBufferOffsets) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); +} + +static void +radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radeon_cmdbuf *cs = cmd_buffer->cs; uint32_t i; @@ -4987,11 +5868,27 @@ void radv_CmdEndTransformFeedbackEXT( * that the primitives-emitted query won't increment. */ radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } radv_set_streamout_enable(cmd_buffer, false); } +void radv_CmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); +} + void radv_CmdDrawIndirectByteCountEXT( VkCommandBuffer commandBuffer, uint32_t instanceCount, @@ -5013,3 +5910,39 @@ void radv_CmdDrawIndirectByteCountEXT( radv_draw(cmd_buffer, &info); } + +/* VK_AMD_buffer_marker */ +void radv_CmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset; + + si_emit_cache_flush(cmd_buffer); + + if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) { + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, marker); + radeon_emit(cs, 0); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + } else { + si_cs_emit_write_event_eop(cs, + cmd_buffer->device->physical_device->rad_info.chip_class, + radv_cmd_buffer_uses_mec(cmd_buffer), + V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DST_SEL_MEM, + EOP_DATA_SEL_VALUE_32BIT, + va, marker, + cmd_buffer->gfx9_eop_bug_va); + } +}