X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=215ccced1449a15af382d05ab1081f6e335f430d;hb=946193ae00832fd26d883a1df7d695e64db6b1a5;hp=2d6609887377a056a2875eeff139d7125b2fb858;hpb=87fbc16e3485306adfc625aafac1d04c58f7df22;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 2d660988737..215ccced144 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -30,8 +30,8 @@ #include "radv_shader.h" #include "radv_cs.h" #include "sid.h" -#include "gfx9d.h" #include "vk_format.h" +#include "vk_util.h" #include "radv_debug.h" #include "radv_meta.h" @@ -58,7 +58,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, - VkImageAspectFlags pending_clears); + struct radv_sample_locations_state *sample_locs); const struct radv_dynamic_state default_dynamic_state = { .viewport = { @@ -106,6 +106,7 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, dest->viewport.count = src->viewport.count; dest->scissor.count = src->scissor.count; dest->discard_rectangle.count = src->discard_rectangle.count; + dest->sample_location.count = src->sample_location.count; if (copy_mask & RADV_DYNAMIC_VIEWPORT) { if (memcmp(&dest->viewport.viewports, &src->viewport.viewports, @@ -193,13 +194,46 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, } } + if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) { + if (dest->sample_location.per_pixel != src->sample_location.per_pixel || + dest->sample_location.grid_size.width != src->sample_location.grid_size.width || + dest->sample_location.grid_size.height != src->sample_location.grid_size.height || + memcmp(&dest->sample_location.locations, + &src->sample_location.locations, + src->sample_location.count * sizeof(VkSampleLocationEXT))) { + dest->sample_location.per_pixel = src->sample_location.per_pixel; + dest->sample_location.grid_size = src->sample_location.grid_size; + typed_memcpy(dest->sample_location.locations, + src->sample_location.locations, + src->sample_location.count); + dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS; + } + } + cmd_buffer->state.dirty |= dest_mask; } +static void +radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radv_shader_info *info; + + if (!pipeline->streamout_shader) + return; + + info = &pipeline->streamout_shader->info.info; + for (int i = 0; i < MAX_SO_BUFFERS; i++) + so->stride_in_dw[i] = info->so.strides[i]; + + so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask; +} + bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) { return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; } enum ring_type radv_queue_family_to_ring(int f) { @@ -285,7 +319,6 @@ radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) static VkResult radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { - cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, @@ -310,24 +343,29 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->record_result = VK_SUCCESS; + memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings)); + for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) { cmd_buffer->descriptors[i].dirty = 0; cmd_buffer->descriptors[i].valid = 0; cmd_buffer->descriptors[i].push_dirty = false; } - if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 && + cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) { unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends; - unsigned eop_bug_offset; + unsigned fence_offset, eop_bug_offset; void *fence_ptr; - radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, - &cmd_buffer->gfx9_fence_offset, + radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset, &fence_ptr); - cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo; + + cmd_buffer->gfx9_fence_va = + radv_buffer_get_va(cmd_buffer->upload.upload_bo); + cmd_buffer->gfx9_fence_va += fence_offset; /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0, + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, &eop_bug_offset, &fence_ptr); cmd_buffer->gfx9_eop_bug_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -356,7 +394,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS| RADEON_FLAG_NO_INTERPROCESS_SHARING | - RADEON_FLAG_32BIT); + RADEON_FLAG_32BIT, + RADV_BO_PRIORITY_UPLOAD_BUFFER); if (!bo) { cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY; @@ -397,6 +436,8 @@ radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, unsigned *out_offset, void **ptr) { + assert(util_is_power_of_two_nonzero(alignment)); + uint64_t offset = align(cmd_buffer->upload.offset, alignment); if (offset + size > cmd_buffer->upload.size) { if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) @@ -437,7 +478,7 @@ radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va, radeon_check_space(cmd_buffer->device->ws, cs, 4 + count); radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME)); radeon_emit(cs, va); @@ -470,24 +511,16 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, enum radv_cmd_flush_bits flags) { if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) { - uint32_t *ptr = NULL; - uint64_t va = 0; - assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_CS_PARTIAL_FLUSH)); - if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { - va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) + - cmd_buffer->gfx9_fence_offset; - ptr = &cmd_buffer->gfx9_fence_idx; - } - radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4); /* Force wait for graphics or compute engines to be idle. */ si_cs_emit_cache_flush(cmd_buffer->cs, cmd_buffer->device->physical_device->rad_info.chip_class, - ptr, va, + &cmd_buffer->gfx9_fence_idx, + cmd_buffer->gfx9_fence_va, radv_cmd_buffer_uses_mec(cmd_buffer), flags, cmd_buffer->gfx9_eop_bug_va); } @@ -551,8 +584,8 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer, for_each_bit(i, descriptors_state->valid) { struct radv_descriptor_set *set = descriptors_state->sets[i]; - data[i * 2] = (uintptr_t)set; - data[i * 2 + 1] = (uintptr_t)set >> 32; + data[i * 2] = (uint64_t)(uintptr_t)set; + data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32; } radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data); @@ -578,8 +611,7 @@ radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer, if (loc->sgpr_idx == -1) return; - assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2)); - assert(!loc->indirect); + assert(loc->num_sgprs == 1); radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, va, false); @@ -608,16 +640,215 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer, struct radv_userdata_info *loc = &locs->descriptor_sets[start]; unsigned sh_offset = sh_base + loc->sgpr_idx * 4; - radv_emit_shader_pointer_head(cs, sh_offset, count, - HAVE_32BIT_POINTERS); + radv_emit_shader_pointer_head(cs, sh_offset, count, true); for (int i = 0; i < count; i++) { struct radv_descriptor_set *set = descriptors_state->sets[start + i]; - radv_emit_shader_pointer_body(device, cs, set->va, - HAVE_32BIT_POINTERS); + radv_emit_shader_pointer_body(device, cs, set->va, true); + } + } +} + +/** + * Convert the user sample locations to hardware sample locations (the values + * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*). + */ +static void +radv_convert_user_sample_locs(struct radv_sample_locations_state *state, + uint32_t x, uint32_t y, VkOffset2D *sample_locs) +{ + uint32_t x_offset = x % state->grid_size.width; + uint32_t y_offset = y % state->grid_size.height; + uint32_t num_samples = (uint32_t)state->per_pixel; + VkSampleLocationEXT *user_locs; + uint32_t pixel_offset; + + pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples; + + assert(pixel_offset <= MAX_SAMPLE_LOCATIONS); + user_locs = &state->locations[pixel_offset]; + + for (uint32_t i = 0; i < num_samples; i++) { + float shifted_pos_x = user_locs[i].x - 0.5; + float shifted_pos_y = user_locs[i].y - 0.5; + + int32_t scaled_pos_x = floor(shifted_pos_x * 16); + int32_t scaled_pos_y = floor(shifted_pos_y * 16); + + sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7); + sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7); + } +} + +/** + * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample + * locations. + */ +static void +radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs, + uint32_t *sample_locs_pixel) +{ + for (uint32_t i = 0; i < num_samples; i++) { + uint32_t sample_reg_idx = i / 4; + uint32_t sample_loc_idx = i % 4; + int32_t pos_x = sample_locs[i].x; + int32_t pos_y = sample_locs[i].y; + + uint32_t shift_x = 8 * sample_loc_idx; + uint32_t shift_y = shift_x + 4; + + sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x; + sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y; + } +} + +/** + * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware + * sample locations. + */ +static uint64_t +radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer, + VkOffset2D *sample_locs, + uint32_t num_samples) +{ + uint32_t centroid_priorities[num_samples]; + uint32_t sample_mask = num_samples - 1; + uint32_t distances[num_samples]; + uint64_t centroid_priority = 0; + + /* Compute the distances from center for each sample. */ + for (int i = 0; i < num_samples; i++) { + distances[i] = (sample_locs[i].x * sample_locs[i].x) + + (sample_locs[i].y * sample_locs[i].y); + } + + /* Compute the centroid priorities by looking at the distances array. */ + for (int i = 0; i < num_samples; i++) { + uint32_t min_idx = 0; + + for (int j = 1; j < num_samples; j++) { + if (distances[j] < distances[min_idx]) + min_idx = j; } + + centroid_priorities[i] = min_idx; + distances[min_idx] = 0xffffffff; + } + + /* Compute the final centroid priority. */ + for (int i = 0; i < 8; i++) { + centroid_priority |= + centroid_priorities[i & sample_mask] << (i * 4); + } + + return centroid_priority << 32 | centroid_priority; +} + +/** + * Emit the sample locations that are specified with VK_EXT_sample_locations. + */ +static void +radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; + struct radv_multisample_state *ms = &pipeline->graphics.ms; + struct radv_sample_locations_state *sample_location = + &cmd_buffer->state.dynamic.sample_location; + uint32_t num_samples = (uint32_t)sample_location->per_pixel; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t sample_locs_pixel[4][2] = {}; + VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */ + uint32_t max_sample_dist = 0; + uint64_t centroid_priority; + + if (!cmd_buffer->state.dynamic.sample_location.count) + return; + + /* Convert the user sample locations to hardware sample locations. */ + radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]); + radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]); + radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]); + radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]); + + /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */ + for (uint32_t i = 0; i < 4; i++) { + radv_compute_sample_locs_pixel(num_samples, sample_locs[i], + sample_locs_pixel[i]); + } + + /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */ + centroid_priority = + radv_compute_centroid_priority(cmd_buffer, sample_locs[0], + num_samples); + + /* Compute the maximum sample distance from the specified locations. */ + for (uint32_t i = 0; i < num_samples; i++) { + VkOffset2D offset = sample_locs[0][i]; + max_sample_dist = MAX2(max_sample_dist, + MAX2(abs(offset.x), abs(offset.y))); + } + + /* Emit the specified user sample locations. */ + switch (num_samples) { + case 2: + case 4: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + break; + case 8: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]); + radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]); + radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]); + radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]); + break; + default: + unreachable("invalid number of samples"); + } + + /* Emit the maximum sample distance and the centroid priority. */ + uint32_t pa_sc_aa_config = ms->pa_sc_aa_config; + + pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST; + pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist); + + radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1); + radeon_emit(cs, pa_sc_aa_config); + + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, centroid_priority); + radeon_emit(cs, centroid_priority >> 32); + + /* GFX9: Flush DFSM when the AA mode changes. */ + if (cmd_buffer->device->dfsm_allowed) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + +static void +radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer, + struct radv_pipeline *pipeline, + gl_shader_stage stage, + int idx, int count, uint32_t *values) +{ + struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); + uint32_t base_reg = pipeline->user_data_0[stage]; + if (loc->sgpr_idx == -1) + return; + + assert(loc->num_sgprs == count); + + radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count); + radeon_emit_array(cmd_buffer->cs, values, count); } static void @@ -640,13 +871,15 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); - radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); + radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); /* GFX9: Flush DFSM when the AA mode changes. */ if (cmd_buffer->device->dfsm_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -843,10 +1076,13 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4); sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4); } + /* TODO: avoid redundantly setting context registers */ radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3); radeon_emit(cmd_buffer->cs, sx_ps_downconvert); radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon); radeon_emit(cmd_buffer->cs, sx_blend_opt_control); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -870,6 +1106,15 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw); + if (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw || + cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash || + memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf, + pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) { + radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw); + cmd_buffer->state.context_roll_without_scissor_emitted = true; + } + for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) { if (!pipeline->shaders[i]) continue; @@ -906,6 +1151,8 @@ radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.dynamic.scissor.scissors, cmd_buffer->state.dynamic.viewport.viewports, cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband); + + cmd_buffer->state.context_roll_without_scissor_emitted = false; } static void @@ -993,12 +1240,13 @@ static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, struct radv_attachment_info *att, - struct radv_image *image, + struct radv_image_view *iview, VkImageLayout layout) { - bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; + bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; + struct radv_image *image = iview->image; if (!radv_layout_dcc_compressed(image, layout, radv_image_queue_family_mask(image, @@ -1007,6 +1255,15 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_DCC_ENABLE; } + if (radv_image_is_tc_compat_cmask(image) && + (radv_is_fmask_decompress_pipeline(cmd_buffer) || + radv_is_dcc_decompress_pipeline(cmd_buffer))) { + /* If this bit is set, the FMASK decompression operation + * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS). + */ + cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; + } + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1026,7 +1283,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32)); radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4, - S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch)); + cb->cb_mrt_epitch); } else { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1045,13 +1302,26 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base); } } + + if (radv_dcc_enabled(image, iview->base_mip)) { + /* Drawing with DCC enabled also compresses colorbuffers. */ + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + radv_update_dcc_metadata(cmd_buffer, image, &range, true); + } } static void radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, struct radv_ds_buffer_info *ds, struct radv_image *image, VkImageLayout layout, - bool requires_cond_write) + bool requires_cond_exec) { uint32_t db_z_info = ds->db_z_info; uint32_t db_z_info_reg; @@ -1075,38 +1345,21 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, } /* When we don't know the last fast clear value we need to emit a - * conditional packet, otherwise we can update DB_Z_INFO directly. + * conditional packet that will eventually skip the following + * SET_CONTEXT_REG packet. */ - if (requires_cond_write) { - radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0)); - - const uint32_t write_space = 0 << 8; /* register */ - const uint32_t poll_space = 1 << 4; /* memory */ - const uint32_t function = 3 << 0; /* equal to the reference */ - const uint32_t options = write_space | poll_space | function; - radeon_emit(cmd_buffer->cs, options); - - /* poll address - location of the depth clear value */ + if (requires_cond_exec) { uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->clear_value_offset; - - /* In presence of stencil format, we have to adjust the base - * address because the first value is the stencil clear value. - */ - if (vk_format_is_stencil(image->vk_format)) - va += 4; + va += image->offset + image->tc_compat_zrange_offset; + radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - - radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */ - radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */ - radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */ - radeon_emit(cmd_buffer->cs, 0u); /* write address high */ - radeon_emit(cmd_buffer->cs, db_z_info); - } else { - radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info); + radeon_emit(cmd_buffer->cs, 0); + radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */ } + + radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info); } static void @@ -1193,10 +1446,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, if (!framebuffer || !subpass) return; - att_idx = subpass->depth_stencil_attachment.attachment; - if (att_idx == VK_ATTACHMENT_UNUSED) + if (!subpass->depth_stencil_attachment) return; + att_idx = subpass->depth_stencil_attachment->attachment; att = &framebuffer->attachments[att_idx]; if (att->attachment->image != image) return; @@ -1210,11 +1463,13 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, */ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && ds_clear_value.depth == 0.0) { - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; radv_update_zrange_precision(cmd_buffer, &att->ds, image, layout, false); } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } /** @@ -1241,8 +1496,8 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); @@ -1253,6 +1508,44 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cs, fui(ds_clear_value.depth)); } +/** + * Update the TC-compat metadata value for this image. + */ +static void +radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, + struct radv_image *image, + uint32_t value) +{ + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint64_t va = radv_buffer_get_va(image->bo); + va += image->offset + image->tc_compat_zrange_offset; + + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, value); +} + +static void +radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, + struct radv_image *image, + VkClearDepthStencilValue ds_clear_value) +{ + uint64_t va = radv_buffer_get_va(image->bo); + va += image->offset + image->tc_compat_zrange_offset; + uint32_t cond_val; + + /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last + * depth clear value is 0.0f. + */ + cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0; + + radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val); +} + /** * Update the clear depth/stencil values for this image. */ @@ -1266,6 +1559,12 @@ radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects); + if (radv_image_is_tc_compat_htile(image) && + (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { + radv_update_tc_compat_zrange_metadata(cmd_buffer, image, + ds_clear_value); + } + radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value, aspects); } @@ -1296,17 +1595,27 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); - radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | - COPY_DATA_DST_SEL(COPY_DATA_REG) | - (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0)); - radeon_emit(cs, va); - radeon_emit(cs, va >> 32); - radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2); - radeon_emit(cs, 0); + uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); - radeon_emit(cs, 0); + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { + radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); + radeon_emit(cs, reg_count); + } else { + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_REG) | + (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, reg >> 2); + radeon_emit(cs, 0); + + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); + radeon_emit(cs, 0); + } } /* @@ -1315,24 +1624,56 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, * cmask eliminate is required. */ void -radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, - bool value) +radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->dcc_pred_offset; + uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; + + assert(radv_dcc_enabled(image, range->baseMipLevel)); + + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); + radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cmd_buffer->cs, va); + radeon_emit(cmd_buffer->cs, va >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } +} + +/** + * Update the DCC predicate to reflect the compression state. + */ +void +radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer, + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) +{ + uint64_t pred_val = value; + uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); - radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); + radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1364,6 +1705,8 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2); radeon_emit(cs, color_values[0]); radeon_emit(cs, color_values[1]); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } /** @@ -1372,23 +1715,28 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, static void radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t color_values[2]) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, color_values[0]); - radeon_emit(cs, color_values[1]); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, color_values[0]); + radeon_emit(cs, color_values[1]); + } } /** @@ -1396,13 +1744,23 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, int cb_idx, uint32_t color_values[2]) { - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + struct radv_image *image = iview->image; + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, iview->base_mip)); - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values); radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx, color_values); @@ -1413,30 +1771,38 @@ radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + struct radv_image_view *iview, int cb_idx) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + struct radv_image *image = iview->image; + uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip); - if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image)) + if (!radv_image_has_cmask(image) && + !radv_dcc_enabled(image, iview->base_mip)) return; uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating)); - radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | - COPY_DATA_DST_SEL(COPY_DATA_REG) | - COPY_DATA_COUNT_SEL); - radeon_emit(cs, va); - radeon_emit(cs, va >> 32); - radeon_emit(cs, reg >> 2); - radeon_emit(cs, 0); + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { + radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); + radeon_emit(cs, 2); + } else { + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_REG) | + COPY_DATA_COUNT_SEL); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, reg >> 2); + radeon_emit(cs, 0); - radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating)); - radeon_emit(cs, 0); + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating)); + radeon_emit(cs, 0); + } } static void @@ -1445,6 +1811,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int i; struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; + unsigned num_bpp64_colorbufs = 0; /* this may happen for inherited secondary recording */ if (!framebuffer) @@ -1459,20 +1826,25 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int idx = subpass->color_attachments[i].attachment; struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; + struct radv_image_view *iview = att->attachment; + struct radv_image *image = iview->image; VkImageLayout layout = subpass->color_attachments[i].layout; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); - radv_emit_fb_color_state(cmd_buffer, i, att, image, layout); + assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); + radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); - radv_load_color_clear_metadata(cmd_buffer, image, i); + radv_load_color_clear_metadata(cmd_buffer, iview, i); + + if (image->planes[0].surface.bpe >= 8) + num_bpp64_colorbufs++; } - if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - int idx = subpass->depth_stencil_attachment.attachment; - VkImageLayout layout = subpass->depth_stencil_attachment.layout; + if (subpass->depth_stencil_attachment) { + int idx = subpass->depth_stencil_attachment->attachment; + VkImageLayout layout = subpass->depth_stencil_attachment->layout; struct radv_attachment_info *att = &framebuffer->attachments[idx]; struct radv_image *image = att->attachment->image; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); @@ -1503,6 +1875,23 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_X(framebuffer->width) | S_028208_BR_Y(framebuffer->height)); + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { + uint8_t watermark = 4; /* Default value for GFX8. */ + + /* For optimal DCC performance. */ + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + if (num_bpp64_colorbufs >= 5) { + watermark = 8; + } else { + watermark = 6; + } + } + + radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL, + S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | + S_028424_OVERWRITE_COMBINER_WATERMARK(watermark)); + } + if (cmd_buffer->device->dfsm_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); @@ -1548,7 +1937,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) uint32_t db_count_control; if(!cmd_buffer->state.active_occlusion_queries) { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) && pipeline->graphics.disable_out_of_order_rast_for_occlusion && has_perfect_queries) { @@ -1567,7 +1956,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | S_028004_SAMPLE_RATE(sample_rate) | @@ -1596,6 +1985,8 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) } radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; } static void @@ -1630,6 +2021,9 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE) radv_emit_discard_rectangle(cmd_buffer); + if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) + radv_emit_sample_locations(cmd_buffer); + cmd_buffer->state.dirty &= ~states; } @@ -1657,7 +2051,7 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer, { struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point); - uint32_t size = MAX_SETS * 2 * 4; + uint32_t size = MAX_SETS * 4; uint32_t offset; void *ptr; @@ -1666,13 +2060,12 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer, return; for (unsigned i = 0; i < MAX_SETS; i++) { - uint32_t *uptr = ((uint32_t *)ptr) + i * 2; + uint32_t *uptr = ((uint32_t *)ptr) + i; uint64_t set_va = 0; struct radv_descriptor_set *set = descriptors_state->sets[i]; if (descriptors_state->valid & (1u << i)) set_va = set->va; uptr[0] = set_va & 0xffffffff; - uptr[1] = set_va >> 32; } uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -1714,6 +2107,8 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS; struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point); + struct radv_cmd_state *state = &cmd_buffer->state; + bool flush_indirect_descriptors; if (!descriptors_state->dirty) return; @@ -1721,10 +2116,14 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, if (descriptors_state->push_dirty) radv_flush_push_descriptors(cmd_buffer, bind_point); - if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) || - (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) { + flush_indirect_descriptors = + (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS && + state->pipeline && state->pipeline->need_indirect_descriptor_sets) || + (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE && + state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets); + + if (flush_indirect_descriptors) radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point); - } MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, @@ -1772,6 +2171,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, radv_get_descriptors_state(cmd_buffer, bind_point); struct radv_pipeline_layout *layout = pipeline->layout; struct radv_shader_variant *shader, *prev_shader; + bool need_push_constants = false; unsigned offset; void *ptr; uint64_t va; @@ -1781,37 +2181,56 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, (!layout->push_constant_size && !layout->dynamic_offset_count)) return; - if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + - 16 * layout->dynamic_offset_count, - 256, &offset, &ptr)) - return; + radv_foreach_stage(stage, stages) { + if (!pipeline->shaders[stage]) + continue; - memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); - memcpy((char*)ptr + layout->push_constant_size, - descriptors_state->dynamic_buffers, - 16 * layout->dynamic_offset_count); + need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants; + need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets; - va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); - va += offset; + uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts; + uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts; - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, MESA_SHADER_STAGES * 4); + radv_emit_inline_push_consts(cmd_buffer, pipeline, stage, + AC_UD_INLINE_PUSH_CONSTANTS, + count, + (uint32_t *)&cmd_buffer->push_constants[base * 4]); + } - prev_shader = NULL; - radv_foreach_stage(stage, stages) { - shader = radv_get_shader(pipeline, stage); + if (need_push_constants) { + if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + + 16 * layout->dynamic_offset_count, + 256, &offset, &ptr)) + return; + + memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); + memcpy((char*)ptr + layout->push_constant_size, + descriptors_state->dynamic_buffers, + 16 * layout->dynamic_offset_count); - /* Avoid redundantly emitting the address for merged stages. */ - if (shader && shader != prev_shader) { - radv_emit_userdata_address(cmd_buffer, pipeline, stage, - AC_UD_PUSH_CONSTANTS, va); + va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); + va += offset; + + MAYBE_UNUSED unsigned cdw_max = + radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, MESA_SHADER_STAGES * 4); + + prev_shader = NULL; + radv_foreach_stage(stage, stages) { + shader = radv_get_shader(pipeline, stage); - prev_shader = shader; + /* Avoid redundantly emitting the address for merged stages. */ + if (shader && shader != prev_shader) { + radv_emit_userdata_address(cmd_buffer, pipeline, stage, + AC_UD_PUSH_CONSTANTS, va); + + prev_shader = shader; + } } + assert(cmd_buffer->cs->cdw <= cdw_max); } cmd_buffer->push_constant_stages &= ~stages; - assert(cmd_buffer->cs->cdw <= cdw_max); } static void @@ -1820,13 +2239,13 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, { if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && - cmd_buffer->state.pipeline->vertex_elements.count && + cmd_buffer->state.pipeline->num_vertex_bindings && radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; unsigned vb_offset; void *vb_ptr; uint32_t i = 0; - uint32_t count = velems->count; + uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings; uint64_t va; /* allocate some descriptor state for vertex buffers */ @@ -1837,21 +2256,28 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, for (i = 0; i < count; i++) { uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; uint32_t offset; - int vb = velems->binding[i]; - struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer; - uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; + struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; + uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + + if (!buffer) + continue; va = radv_buffer_get_va(buffer->bo); - offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i]; + offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) + if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; else desc[2] = buffer->size - offset; - desc[3] = velems->rsrc_word3[i]; + desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | + S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -1868,17 +2294,153 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, } static void -radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty) +radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va) { - radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty); - radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); - radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); + struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; + struct radv_userdata_info *loc; + uint32_t base_reg; + + for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) { + if (!radv_get_shader(pipeline, stage)) + continue; + + loc = radv_lookup_user_sgpr(pipeline, stage, + AC_UD_STREAMOUT_BUFFERS); + if (loc->sgpr_idx == -1) + continue; + + base_reg = pipeline->user_data_0[stage]; + + radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, + base_reg + loc->sgpr_idx * 4, va, false); + } + + if (pipeline->gs_copy_shader) { + loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS]; + if (loc->sgpr_idx != -1) { + base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0; + + radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, + base_reg + loc->sgpr_idx * 4, va, false); + } + } } static void -radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, - bool instanced_draw, bool indirect_draw, - uint32_t draw_vertex_count) +radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) +{ + if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) { + struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + unsigned so_offset; + void *so_ptr; + uint64_t va; + + /* Allocate some descriptor state for streamout buffers. */ + if (!radv_cmd_buffer_upload_alloc(cmd_buffer, + MAX_SO_BUFFERS * 16, 256, + &so_offset, &so_ptr)) + return; + + for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) { + struct radv_buffer *buffer = sb[i].buffer; + uint32_t *desc = &((uint32_t *)so_ptr)[i * 4]; + + if (!(so->enabled_mask & (1 << i))) + continue; + + va = radv_buffer_get_va(buffer->bo) + buffer->offset; + + va += sb[i].offset; + + /* Set the descriptor. + * + * On GFX8, the format must be non-INVALID, otherwise + * the buffer will be considered not bound and store + * instructions will be no-ops. + */ + desc[0] = va; + desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); + desc[2] = 0xffffffff; + desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + } + + va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); + va += so_offset; + + radv_emit_streamout_buffers(cmd_buffer, va); + } + + cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER; +} + +static void +radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty) +{ + radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty); + radv_flush_streamout_descriptors(cmd_buffer); + radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); + radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); +} + +struct radv_draw_info { + /** + * Number of vertices. + */ + uint32_t count; + + /** + * Index of the first vertex. + */ + int32_t vertex_offset; + + /** + * First instance id. + */ + uint32_t first_instance; + + /** + * Number of instances. + */ + uint32_t instance_count; + + /** + * First index (indexed draws only). + */ + uint32_t first_index; + + /** + * Whether it's an indexed draw. + */ + bool indexed; + + /** + * Indirect draw parameters resource. + */ + struct radv_buffer *indirect; + uint64_t indirect_offset; + uint32_t stride; + + /** + * Draw count parameters resource. + */ + struct radv_buffer *count_buffer; + uint64_t count_buffer_offset; + + /** + * Stream output parameters resource. + */ + struct radv_buffer *strmout_buffer; + uint64_t strmout_buffer_offset; +}; + +static void +radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, + const struct radv_draw_info *draw_info) { struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; struct radv_cmd_state *state = &cmd_buffer->state; @@ -1888,15 +2450,17 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, /* Draw state. */ ia_multi_vgt_param = - si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, - indirect_draw, draw_vertex_count); + si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, + draw_info->indirect, + !!draw_info->strmout_buffer, + draw_info->indirect ? 0 : draw_info->count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { if (info->chip_class >= GFX9) { radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); - } else if (info->chip_class >= CIK) { + } else if (info->chip_class >= GFX7) { radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); @@ -1909,7 +2473,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, /* Primitive restart. */ primitive_reset_en = - indexed_draw && state->pipeline->graphics.prim_restart_enable; + draw_info->indexed && state->pipeline->graphics.prim_restart_enable; if (primitive_reset_en != state->last_primitive_reset_en) { state->last_primitive_reset_en = primitive_reset_en; @@ -1935,6 +2499,27 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw, state->last_primitive_reset_index = primitive_reset_index; } } + + if (draw_info->strmout_buffer) { + uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo); + + va += draw_info->strmout_buffer->offset + + draw_info->strmout_buffer_offset; + + radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, + draw_info->stride); + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_REG) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); + radeon_emit(cs, 0); /* unused */ + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo); + } } static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, @@ -1961,7 +2546,8 @@ static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | - VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) { + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) { cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; } } @@ -1985,6 +2571,8 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, for_each_bit(b, src_flags) { switch ((VkAccessFlagBits)(1 << b)) { case VK_ACCESS_SHADER_WRITE_BIT: + case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT: + case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; break; case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: @@ -2054,6 +2642,7 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, switch ((VkAccessFlagBits)(1 << b)) { case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: case VK_ACCESS_INDEX_READ_BIT: + case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: break; case VK_ACCESS_UNIFORM_READ_BIT: flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; @@ -2099,11 +2688,67 @@ void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, NULL); } +uint32_t +radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = state->subpass - state->pass->subpasses; + + /* The id of this subpass shouldn't exceed the number of subpasses in + * this render pass minus 1. + */ + assert(subpass_id < state->pass->subpass_count); + return subpass_id; +} + +static struct radv_sample_locations_state * +radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, + uint32_t att_idx, + bool begin_subpass) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + + if (view->image->info.samples == 1) + return NULL; + + if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) { + /* Return the initial sample locations if this is the initial + * layout transition of the given subpass attachemnt. + */ + if (state->attachments[att_idx].sample_location.count > 0) + return &state->attachments[att_idx].sample_location; + } else { + /* Otherwise return the subpass sample locations if defined. */ + if (state->subpass_sample_locs) { + /* Because the driver sets the current subpass before + * initial layout transitions, we should use the sample + * locations from the previous subpass to avoid an + * off-by-one problem. Otherwise, use the sample + * locations for the current subpass for final layout + * transitions. + */ + if (begin_subpass) + subpass_id--; + + for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) { + if (state->subpass_sample_locs[i].subpass_idx == subpass_id) + return &state->subpass_sample_locs[i].sample_location; + } + } + } + + return NULL; +} + static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, - struct radv_subpass_attachment att) + struct radv_subpass_attachment att, + bool begin_subpass) { unsigned idx = att.attachment; struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; range.aspectMask = 0; range.baseMipLevel = view->base_mip; @@ -2111,11 +2756,27 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.baseArrayLayer = view->base_layer; range.layerCount = cmd_buffer->state.framebuffer->layers; + if (cmd_buffer->state.subpass->view_mask) { + /* If the current subpass uses multiview, the driver might have + * performed a fast color/depth clear to the whole image + * (including all layers). To make sure the driver will + * decompress the image correctly (if needed), we have to + * account for the "real" number of layers. If the view mask is + * sparse, this will decompress more layers than needed. + */ + range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask); + } + + /* Get the subpass sample locations for the given attachment, if NULL + * is returned the driver will use the default HW locations. + */ + sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, + begin_subpass); + radv_handle_image_transition(cmd_buffer, view->image, cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range, - cmd_buffer->state.attachments[idx].pending_clear_aspects); + att.layout, 0, 0, &range, sample_locs); cmd_buffer->state.attachments[idx].current_layout = att.layout; @@ -2124,31 +2785,94 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf void radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, - const struct radv_subpass *subpass, bool transitions) + const struct radv_subpass *subpass) { - if (transitions) { - radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + cmd_buffer->state.subpass = subpass; - for (unsigned i = 0; i < subpass->color_count; ++i) { - if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) - radv_handle_subpass_image_transition(cmd_buffer, - subpass->color_attachments[i]); - } + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; +} - for (unsigned i = 0; i < subpass->input_count; ++i) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->input_attachments[i]); - } +static VkResult +radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, + struct radv_render_pass *pass, + const VkRenderPassBeginInfo *info) +{ + const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs = + vk_find_struct_const(info->pNext, + RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_framebuffer *framebuffer = state->framebuffer; - if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - radv_handle_subpass_image_transition(cmd_buffer, - subpass->depth_stencil_attachment); - } + if (!sample_locs) { + state->subpass_sample_locs = NULL; + return VK_SUCCESS; } - cmd_buffer->state.subpass = subpass; + for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) { + const VkAttachmentSampleLocationsEXT *att_sample_locs = + &sample_locs->pAttachmentInitialSampleLocations[i]; + uint32_t att_idx = att_sample_locs->attachmentIndex; + struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; + struct radv_image *image = att->attachment->image; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; + assert(vk_format_is_depth_or_stencil(image->vk_format)); + + /* From the Vulkan spec 1.1.108: + * + * "If the image referenced by the framebuffer attachment at + * index attachmentIndex was not created with + * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT + * then the values specified in sampleLocationsInfo are + * ignored." + */ + if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) + continue; + + const VkSampleLocationsInfoEXT *sample_locs_info = + &att_sample_locs->sampleLocationsInfo; + + state->attachments[att_idx].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->attachments[att_idx].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->attachments[att_idx].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->attachments[att_idx].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc, + sample_locs->postSubpassSampleLocationsCount * + sizeof(state->subpass_sample_locs[0]), + 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (state->subpass_sample_locs == NULL) { + cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY; + return cmd_buffer->record_result; + } + + state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount; + + for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) { + const VkSubpassSampleLocationsEXT *subpass_sample_locs_info = + &sample_locs->pPostSubpassSampleLocations[i]; + const VkSampleLocationsInfoEXT *sample_locs_info = + &subpass_sample_locs_info->sampleLocationsInfo; + + state->subpass_sample_locs[i].subpass_idx = + subpass_sample_locs_info->subpassIndex; + state->subpass_sample_locs[i].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->subpass_sample_locs[i].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->subpass_sample_locs[i].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + return VK_SUCCESS; } static VkResult @@ -2205,6 +2929,7 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].sample_location.count = 0; } return VK_SUCCESS; @@ -2290,20 +3015,6 @@ VkResult radv_ResetCommandBuffer( return radv_reset_cmd_buffer(cmd_buffer); } -static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer) -{ - struct radv_device *device = cmd_buffer->device; - if (device->gfx_init) { - uint64_t va = radv_buffer_get_va(device->gfx_init); - radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0)); - radeon_emit(cmd_buffer->cs, va); - radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff); - } else - si_init_config(cmd_buffer); -} - VkResult radv_BeginCommandBuffer( VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) @@ -2329,21 +3040,6 @@ VkResult radv_BeginCommandBuffer( cmd_buffer->state.predication_type = -1; cmd_buffer->usage_flags = pBeginInfo->flags; - /* setup initial configuration into command buffer */ - if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { - switch (cmd_buffer->queue_family_index) { - case RADV_QUEUE_GENERAL: - emit_gfx_buffer_state(cmd_buffer); - break; - case RADV_QUEUE_COMPUTE: - si_init_compute(cmd_buffer); - break; - case RADV_QUEUE_TRANSFER: - default: - break; - } - } - if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY && (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { assert(pBeginInfo->pInheritanceInfo); @@ -2357,7 +3053,7 @@ VkResult radv_BeginCommandBuffer( if (result != VK_SUCCESS) return result; - radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); } if (unlikely(cmd_buffer->device->trace_bo)) { @@ -2593,6 +3289,14 @@ void radv_CmdPushDescriptorSetKHR( pipelineBindPoint)) return; + /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR() + * because it is invalid, according to Vulkan spec. + */ + for (int i = 0; i < descriptorWriteCount; i++) { + MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; + assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); + } + radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer, radv_descriptor_set_to_handle(push_set), descriptorWriteCount, pDescriptorWrites, 0, NULL); @@ -2603,7 +3307,7 @@ void radv_CmdPushDescriptorSetKHR( void radv_CmdPushDescriptorSetWithTemplateKHR( VkCommandBuffer commandBuffer, - VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout _layout, uint32_t set, const void* pData) @@ -2647,8 +3351,14 @@ VkResult radv_EndCommandBuffer( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { - if (cmd_buffer->device->physical_device->rad_info.chip_class == SI) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + + /* Make sure to sync all pending active queries at the end of + * command buffer. + */ + cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; + si_emit_cache_flush(cmd_buffer); } @@ -2658,6 +3368,7 @@ VkResult radv_EndCommandBuffer( si_cp_dma_wait_for_idle(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); @@ -2675,6 +3386,8 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) return; + assert(!pipeline->ctx_cs.cdw); + cmd_buffer->state.emitted_compute_pipeline = pipeline; radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw); @@ -2737,6 +3450,7 @@ void radv_CmdBindPipeline( cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS; radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state); + radv_bind_streamout_state(cmd_buffer, pipeline); if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed) cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size; @@ -2765,6 +3479,11 @@ void radv_CmdSetViewport( assert(firstViewport < MAX_VIEWPORTS); assert(total_count >= 1 && total_count <= MAX_VIEWPORTS); + if (!memcmp(state->dynamic.viewport.viewports + firstViewport, + pViewports, viewportCount * sizeof(*pViewports))) { + return; + } + memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports, viewportCount * sizeof(*pViewports)); @@ -2784,6 +3503,11 @@ void radv_CmdSetScissor( assert(firstScissor < MAX_SCISSORS); assert(total_count >= 1 && total_count <= MAX_SCISSORS); + if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors, + scissorCount * sizeof(*pScissors))) { + return; + } + memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors, scissorCount * sizeof(*pScissors)); @@ -2795,6 +3519,10 @@ void radv_CmdSetLineWidth( float lineWidth) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + if (cmd_buffer->state.dynamic.line_width == lineWidth) + return; + cmd_buffer->state.dynamic.line_width = lineWidth; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; } @@ -2806,12 +3534,19 @@ void radv_CmdSetDepthBias( float depthBiasSlopeFactor) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_bias.bias == depthBiasConstantFactor && + state->dynamic.depth_bias.clamp == depthBiasClamp && + state->dynamic.depth_bias.slope == depthBiasSlopeFactor) { + return; + } - cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; - cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; - cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; + state->dynamic.depth_bias.bias = depthBiasConstantFactor; + state->dynamic.depth_bias.clamp = depthBiasClamp; + state->dynamic.depth_bias.slope = depthBiasSlopeFactor; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; } void radv_CmdSetBlendConstants( @@ -2819,11 +3554,14 @@ void radv_CmdSetBlendConstants( const float blendConstants[4]) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4)) + return; - memcpy(cmd_buffer->state.dynamic.blend_constants, - blendConstants, sizeof(float) * 4); + memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4); - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; } void radv_CmdSetDepthBounds( @@ -2832,11 +3570,17 @@ void radv_CmdSetDepthBounds( float maxDepthBounds) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_bounds.min == minDepthBounds && + state->dynamic.depth_bounds.max == maxDepthBounds) { + return; + } - cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; - cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; + state->dynamic.depth_bounds.min = minDepthBounds; + state->dynamic.depth_bounds.max = maxDepthBounds; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; } void radv_CmdSetStencilCompareMask( @@ -2845,13 +3589,21 @@ void radv_CmdSetStencilCompareMask( uint32_t compareMask) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_compare_mask.front == compareMask; + bool back_same = state->dynamic.stencil_compare_mask.back == compareMask; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) - cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; + state->dynamic.stencil_compare_mask.front = compareMask; if (faceMask & VK_STENCIL_FACE_BACK_BIT) - cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; + state->dynamic.stencil_compare_mask.back = compareMask; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; } void radv_CmdSetStencilWriteMask( @@ -2860,13 +3612,21 @@ void radv_CmdSetStencilWriteMask( uint32_t writeMask) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_write_mask.front == writeMask; + bool back_same = state->dynamic.stencil_write_mask.back == writeMask; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) - cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; + state->dynamic.stencil_write_mask.front = writeMask; if (faceMask & VK_STENCIL_FACE_BACK_BIT) - cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; + state->dynamic.stencil_write_mask.back = writeMask; - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; } void radv_CmdSetStencilReference( @@ -2875,6 +3635,14 @@ void radv_CmdSetStencilReference( uint32_t reference) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = state->dynamic.stencil_reference.front == reference; + bool back_same = state->dynamic.stencil_reference.back == reference; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) { + return; + } if (faceMask & VK_STENCIL_FACE_FRONT_BIT) cmd_buffer->state.dynamic.stencil_reference.front = reference; @@ -2897,12 +3665,36 @@ void radv_CmdSetDiscardRectangleEXT( assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES); assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES); + if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle, + pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) { + return; + } + typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle], pDiscardRectangles, discardRectangleCount); state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE; } +void radv_CmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS); + + state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel; + state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize; + state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount; + typed_memcpy(&state->dynamic.sample_location.locations[0], + pSampleLocationsInfo->pSampleLocations, + pSampleLocationsInfo->sampleLocationsCount); + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS; +} + void radv_CmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, @@ -2932,6 +3724,15 @@ void radv_CmdExecuteCommands( if (secondary->sample_positions_needed) primary->sample_positions_needed = true; + if (!secondary->state.framebuffer && + (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) { + /* Emit the framebuffer state from primary if secondary + * has been recorded without a framebuffer, otherwise + * fast color/depth clears can't work. + */ + radv_emit_framebuffer_state(primary); + } + primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); @@ -3062,7 +3863,7 @@ VkResult radv_ResetCommandPool( void radv_TrimCommandPool( VkDevice device, VkCommandPool commandPool, - VkCommandPoolTrimFlagsKHR flags) + VkCommandPoolTrimFlags flags) { RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); @@ -3075,6 +3876,58 @@ void radv_TrimCommandPool( } } +static void +radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, + uint32_t subpass_id) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; + + MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, + cmd_buffer->cs, 4096); + + radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + radv_handle_subpass_image_transition(cmd_buffer, + subpass->attachments[i], + true); + } + + radv_cmd_buffer_clear_subpass(cmd_buffer); + + assert(cmd_buffer->cs->cdw <= cdw_max); +} + +static void +radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + const struct radv_subpass *subpass = state->subpass; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + + radv_cmd_buffer_resolve_subpass(cmd_buffer); + + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { + const uint32_t a = subpass->attachments[i].attachment; + if (a == VK_ATTACHMENT_UNUSED) + continue; + + if (state->pass->attachments[a].last_subpass_idx != subpass_id) + continue; + + VkImageLayout layout = state->pass->attachments[a].final_layout; + struct radv_subpass_attachment att = { a, layout }; + radv_handle_subpass_image_transition(cmd_buffer, att, false); + } +} + void radv_CmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, @@ -3083,10 +3936,7 @@ void radv_CmdBeginRenderPass( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); - - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, 2048); - MAYBE_UNUSED VkResult result; + VkResult result; cmd_buffer->state.framebuffer = framebuffer; cmd_buffer->state.pass = pass; @@ -3096,10 +3946,11 @@ void radv_CmdBeginRenderPass( if (result != VK_SUCCESS) return; - radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); - assert(cmd_buffer->cs->cdw <= cdw_max); + result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin); + if (result != VK_SUCCESS) + return; - radv_cmd_buffer_clear_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } void radv_CmdBeginRenderPass2KHR( @@ -3117,13 +3968,9 @@ void radv_CmdNextSubpass( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, - 2048); - - radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); - radv_cmd_buffer_clear_subpass(cmd_buffer); + uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer); + radv_cmd_buffer_end_subpass(cmd_buffer); + radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1); } void radv_CmdNextSubpass2KHR( @@ -3159,12 +4006,13 @@ static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned in static void radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer, - uint32_t vertex_count) + uint32_t vertex_count, + bool use_opaque) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating)); radeon_emit(cmd_buffer->cs, vertex_count); radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | - S_0287F0_USE_OPAQUE(0)); + S_0287F0_USE_OPAQUE(use_opaque)); } static void @@ -3225,51 +4073,6 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer, } } -struct radv_draw_info { - /** - * Number of vertices. - */ - uint32_t count; - - /** - * Index of the first vertex. - */ - int32_t vertex_offset; - - /** - * First instance id. - */ - uint32_t first_instance; - - /** - * Number of instances. - */ - uint32_t instance_count; - - /** - * First index (indexed draws only). - */ - uint32_t first_index; - - /** - * Whether it's an indexed draw. - */ - bool indexed; - - /** - * Indirect draw parameters resource. - */ - struct radv_buffer *indirect; - uint64_t indirect_offset; - uint32_t stride; - - /** - * Draw count parameters resource. - */ - struct radv_buffer *count_buffer; - uint64_t count_buffer_offset; -}; - static void radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, const struct radv_draw_info *info) @@ -3362,14 +4165,17 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, } } else { if (!state->subpass->view_mask) { - radv_cs_emit_draw_packet(cmd_buffer, info->count); + radv_cs_emit_draw_packet(cmd_buffer, + info->count, + !!info->strmout_buffer); } else { unsigned i; for_each_bit(i, state->subpass->view_mask) { radv_emit_view_index(cmd_buffer, i); radv_cs_emit_draw_packet(cmd_buffer, - info->count); + info->count, + !!info->strmout_buffer); } } } @@ -3393,26 +4199,30 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer, * any context registers. */ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, - bool indexed_draw) + const struct radv_draw_info *info) { struct radv_cmd_state *state = &cmd_buffer->state; if (!cmd_buffer->device->physical_device->has_scissor_bug) return false; + if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer) + return true; + uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL; - /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */ - used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE); + /* Index, vertex and streamout buffers don't change context regs, and + * pipeline is already handled. + */ + used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | + RADV_CMD_DIRTY_VERTEX_BUFFER | + RADV_CMD_DIRTY_STREAMOUT_BUFFER | + RADV_CMD_DIRTY_PIPELINE); - /* Assume all state changes except these two can imply context rolls. */ if (cmd_buffer->state.dirty & used_states) return true; - if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline) - return true; - - if (indexed_draw && state->pipeline->graphics.prim_restart_enable && + if (info->indexed && state->pipeline->graphics.prim_restart_enable && (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index) return true; @@ -3423,7 +4233,7 @@ static void radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, const struct radv_draw_info *info) { - bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed); + bool late_scissor_emission; if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) || cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline) @@ -3432,6 +4242,12 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) radv_emit_graphics_pipeline(cmd_buffer); + /* This should be before the cmd_buffer->state.dirty is cleared + * (excluding RADV_CMD_DIRTY_PIPELINE) and after + * cmd_buffer->state.context_roll_without_scissor_emitted is set. */ + late_scissor_emission = + radv_need_late_scissor_emission(cmd_buffer, info); + if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) radv_emit_framebuffer_state(cmd_buffer); @@ -3439,11 +4255,11 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER) radv_emit_index_buffer(cmd_buffer); } else { - /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, + /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE, * so the state must be re-emitted before the next indexed * draw. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { cmd_buffer->state.last_index_type = -1; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; } @@ -3451,9 +4267,7 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, radv_cmd_buffer_flush_dynamic_state(cmd_buffer); - radv_emit_draw_registers(cmd_buffer, info->indexed, - info->instance_count > 1, info->indirect, - info->indirect ? 0 : info->count); + radv_emit_draw_registers(cmd_buffer, info); if (late_scissor_emission) radv_emit_scissor(cmd_buffer); @@ -3463,8 +4277,10 @@ static void radv_draw(struct radv_cmd_buffer *cmd_buffer, const struct radv_draw_info *info) { + struct radeon_info *rad_info = + &cmd_buffer->device->physical_device->rad_info; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; @@ -3473,6 +4289,19 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4096); + if (likely(!info->indirect)) { + /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is + * no workaround for indirect draws, but we can at least skip + * direct draws. + */ + if (unlikely(!info->instance_count)) + return; + + /* Handle count == 0. */ + if (unlikely(!info->count && !info->strmout_buffer)) + return; + } + /* Use optimal packet order based on whether we need to sync the * pipeline. */ @@ -3532,6 +4361,16 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, } } + /* Workaround for a VGT hang when streamout is enabled. + * It must be done after drawing. + */ + if (cmd_buffer->state.streamout.streamout_enabled && + (rad_info->family == CHIP_HAWAII || + rad_info->family == CHIP_TONGA || + rad_info->family == CHIP_FIJI)) { + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC; + } + assert(cmd_buffer->cs->cdw <= cdw_max); radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH); } @@ -3614,55 +4453,6 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - -void radv_CmdDrawIndexedIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.indexed = true; - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - void radv_CmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer _buffer, @@ -3762,7 +4552,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, if (loc->sgpr_idx != -1) { for (unsigned i = 0; i < 3; ++i) { radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); - radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | COPY_DATA_DST_SEL(COPY_DATA_REG)); radeon_emit(cs, (va + 4 * i)); radeon_emit(cs, (va + 4 * i) >> 32); @@ -3832,7 +4622,6 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, } if (loc->sgpr_idx != -1) { - assert(!loc->indirect); assert(loc->num_sgprs == 3); radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + @@ -3879,7 +4668,7 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer, { struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = pipeline && pipeline != cmd_buffer->state.emitted_compute_pipeline; @@ -4000,20 +4789,16 @@ void radv_CmdEndRenderPass( radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); - radv_cmd_buffer_resolve_subpass(cmd_buffer); - - for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { - VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; - radv_handle_subpass_image_transition(cmd_buffer, - (struct radv_subpass_attachment){i, layout}); - } + radv_cmd_buffer_end_subpass(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); cmd_buffer->state.pass = NULL; cmd_buffer->state.subpass = NULL; cmd_buffer->state.attachments = NULL; cmd_buffer->state.framebuffer = NULL; + cmd_buffer->state.subpass_sample_locs = NULL; } void radv_CmdEndRenderPass2KHR( @@ -4037,19 +4822,14 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, { assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); - unsigned layer_count = radv_get_layerCount(image, range); - uint64_t size = image->surface.htile_slice_size * layer_count; VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; - uint64_t offset = image->offset + image->htile_offset + - image->surface.htile_slice_size * range->baseArrayLayer; struct radv_cmd_state *state = &cmd_buffer->state; VkClearDepthStencilValue value = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset, - size, clear_word); + state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4057,6 +4837,15 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects); + + if (radv_image_is_tc_compat_htile(image)) { + /* Initialize the TC-compat metada value to 0 because by + * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only + * need have to conditionally update its value when performing + * a fast depth clear. + */ + radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0); + } } static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, @@ -4066,15 +4855,20 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range, - VkImageAspectFlags pending_clears) + struct radv_sample_locations_state *sample_locs) { if (!radv_image_has_htile(image)) return; - if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && - radv_layout_has_htile(image, dst_layout, dst_queue_mask)) { - /* TODO: merge with the clear if applicable */ - radv_initialize_htile(cmd_buffer, image, range, 0); + if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { + uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; + + if (radv_layout_is_htile_compressed(image, dst_layout, + dst_queue_mask)) { + clear_value = 0; + } + + radv_initialize_htile(cmd_buffer, image, range, clear_value); } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; @@ -4089,7 +4883,8 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range); + radv_decompress_depth_image_inplace(cmd_buffer, image, + &local_range, sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4109,15 +4904,84 @@ static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; } +void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, + struct radv_image *image) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + static const uint32_t fmask_clear_values[4] = { + 0x00000000, + 0x02020202, + 0xE4E4E4E4, + 0x76543210 + }; + uint32_t log2_samples = util_logbase2(image->info.samples); + uint32_t value = fmask_clear_values[log2_samples]; + + state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | + RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + + state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value); + + state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; +} + void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t level_count = radv_get_levelCount(image, range); + unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value); + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + /* Mipmap level aren't implemented. */ + assert(level_count == 1); + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, + range, value); + } else { + /* Initialize the mipmap levels with DCC first. */ + for (unsigned l = 0; l < level_count; l++) { + uint32_t level = range->baseMipLevel + l; + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[level]; + + if (!surf_level->dcc_fast_clear_size) + break; + + state->flush_bits |= + radv_dcc_clear_level(cmd_buffer, image, + level, value); + } + + /* When DCC is enabled with mipmaps, some levels might not + * support fast clears and we have to initialize them as "fully + * expanded". + */ + if (image->planes[0].surface.num_dcc_levels > 1) { + /* Compute the size of all fast clearable DCC levels. */ + for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[i]; + + if (!surf_level->dcc_fast_clear_size) + break; + + size = surf_level->dcc_offset + surf_level->dcc_fast_clear_size; + } + + /* Initialize the mipmap levels without DCC. */ + if (size != image->planes[0].surface.dcc_size) { + state->flush_bits |= + radv_fill_buffer(cmd_buffer, image->bo, + image->offset + image->dcc_offset + size, + image->planes[0].surface.dcc_size - size, + 0xffffffff); + } + } + } state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -4131,7 +4995,8 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, VkImageLayout src_layout, VkImageLayout dst_layout, unsigned src_queue_mask, - unsigned dst_queue_mask) + unsigned dst_queue_mask, + const VkImageSubresourceRange *range) { if (radv_image_has_cmask(image)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ @@ -4144,7 +5009,11 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, radv_initialise_cmask(cmd_buffer, image, value); } - if (radv_image_has_dcc(image)) { + if (radv_image_has_fmask(image)) { + radv_initialize_fmask(cmd_buffer, image); + } + + if (radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; @@ -4154,15 +5023,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, need_decompress_pass = true; } - radv_initialize_dcc(cmd_buffer, image, value); + radv_initialize_dcc(cmd_buffer, image, range, value); - radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image, - need_decompress_pass); + radv_update_fce_metadata(cmd_buffer, image, range, + need_decompress_pass); } - if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) { + if (radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t color_values[2] = {}; - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, range, + color_values); } } @@ -4180,13 +5051,14 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, src_layout, dst_layout, - src_queue_mask, dst_queue_mask); + src_queue_mask, dst_queue_mask, + range); return; } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { - radv_initialize_dcc(cmd_buffer, image, 0xffffffffu); + radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); @@ -4195,10 +5067,28 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { + bool fce_eliminate = false, fmask_expand = false; + if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { - radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + fce_eliminate = true; } + + if (radv_image_has_fmask(image)) { + if (src_layout != VK_IMAGE_LAYOUT_GENERAL && + dst_layout == VK_IMAGE_LAYOUT_GENERAL) { + /* A FMASK decompress is required before doing + * a MSAA decompress using FMASK. + */ + fmask_expand = true; + } + } + + if (fce_eliminate || fmask_expand) + radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + + if (fmask_expand) + radv_expand_fmask_image_inplace(cmd_buffer, image, range); } } @@ -4209,7 +5099,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, - VkImageAspectFlags pending_clears) + struct radv_sample_locations_state *sample_locs) { if (image->exclusive && src_family != dst_family) { /* This is an acquire or a release operation and there will be @@ -4219,6 +5109,9 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, assert(src_family == cmd_buffer->queue_family_index || dst_family == cmd_buffer->queue_family_index); + if (src_family == VK_QUEUE_FAMILY_EXTERNAL) + return; + if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) return; @@ -4228,6 +5121,9 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, return; } + if (src_layout == dst_layout) + return; + unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index); @@ -4239,7 +5135,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, radv_handle_depth_image_transition(cmd_buffer, image, src_layout, dst_layout, src_queue_mask, dst_queue_mask, - range, pending_clears); + range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, src_layout, dst_layout, @@ -4252,6 +5148,7 @@ struct radv_barrier_info { uint32_t eventCount; const VkEvent *pEvents; VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; }; static void @@ -4276,7 +5173,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); - si_emit_wait_fence(cs, va, 1, 0xffffffff); + radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff); assert(cmd_buffer->cs->cdw <= cdw_max); } @@ -4303,24 +5200,54 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, image); } - radv_stage_flush(cmd_buffer, info->srcStageMask); + /* The Vulkan spec 1.1.98 says: + * + * "An execution dependency with only + * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask + * will only prevent that stage from executing in subsequently + * submitted commands. As this stage does not perform any actual + * execution, this is not observable - in effect, it does not delay + * processing of subsequent commands. Similarly an execution dependency + * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask + * will effectively not wait for any prior commands to complete." + */ + if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) + radv_stage_flush(cmd_buffer, info->srcStageMask); cmd_buffer->state.flush_bits |= src_flush_bits; for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); + + const struct VkSampleLocationsInfoEXT *sample_locs_info = + vk_find_struct_const(pImageMemoryBarriers[i].pNext, + SAMPLE_LOCATIONS_INFO_EXT); + struct radv_sample_locations_state sample_locations = {}; + + if (sample_locs_info) { + assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT); + sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel; + sample_locations.grid_size = sample_locs_info->sampleLocationGridSize; + sample_locations.count = sample_locs_info->sampleLocationsCount; + typed_memcpy(&sample_locations.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, &pImageMemoryBarriers[i].subresourceRange, - 0); + sample_locs_info ? &sample_locations : NULL); } /* Make sure CP DMA is idle because the driver might have performed a * DMA operation for copying or filling buffers/images. */ - si_cp_dma_wait_for_idle(cmd_buffer); + if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT | + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)) + si_cp_dma_wait_for_idle(cmd_buffer); cmd_buffer->state.flush_bits |= dst_flush_bits; } @@ -4343,6 +5270,7 @@ void radv_CmdPipelineBarrier( info.eventCount = 0; info.pEvents = NULL; info.srcStageMask = srcStageMask; + info.dstStageMask = destStageMask; radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, @@ -4358,9 +5286,11 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, struct radeon_cmdbuf *cs = cmd_buffer->cs; uint64_t va = radv_buffer_get_va(event->bo); + si_emit_cache_flush(cmd_buffer); + radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18); + MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); /* Flags that only require a top-of-pipe event. */ VkPipelineStageFlags top_of_pipe_flags = @@ -4375,14 +5305,16 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, /* Make sure CP DMA is idle because the driver might have performed a * DMA operation for copying or filling buffers/images. */ - si_cp_dma_wait_for_idle(cmd_buffer); + if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT | + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)) + si_cp_dma_wait_for_idle(cmd_buffer); /* TODO: Emit EOS events for syncing PS/CS stages. */ if (!(stageMask & ~top_of_pipe_flags)) { /* Just need to sync the PFP engine. */ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); @@ -4391,7 +5323,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, } else if (!(stageMask & ~post_index_fetch_flags)) { /* Sync ME because PFP reads index and indirect buffers. */ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME)); radeon_emit(cs, va); @@ -4403,7 +5335,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, - EOP_DATA_SEL_VALUE_32BIT, va, 2, value, + EOP_DATA_SEL_VALUE_32BIT, va, value, cmd_buffer->gfx9_eop_bug_va); } @@ -4462,29 +5394,80 @@ void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer, } /* VK_EXT_conditional_rendering */ -void vkCmdBeginConditionalRenderingEXT( +void radv_CmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer); - bool inverted; - uint64_t va; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + bool draw_visible = true; + uint64_t pred_value = 0; + uint64_t va, new_va; + unsigned pred_offset; va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset; - inverted = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT; + /* By default, if the 32-bit value at offset in buffer memory is zero, + * then the rendering commands are discarded, otherwise they are + * executed as normal. If the inverted flag is set, all commands are + * discarded if the value is non zero. + */ + if (pConditionalRenderingBegin->flags & + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) { + draw_visible = false; + } + + si_emit_cache_flush(cmd_buffer); + + /* From the Vulkan spec 1.1.107: + * + * "If the 32-bit value at offset in buffer memory is zero, then the + * rendering commands are discarded, otherwise they are executed as + * normal. If the value of the predicate in buffer memory changes while + * conditional rendering is active, the rendering commands may be + * discarded in an implementation-dependent way. Some implementations + * may latch the value of the predicate upon beginning conditional + * rendering while others may read it before every rendering command." + * + * But, the AMD hardware treats the predicate as a 64-bit value which + * means we need a workaround in the driver. Luckily, it's not required + * to support if the value changes when predication is active. + * + * The workaround is as follows: + * 1) allocate a 64-value in the upload BO and initialize it to 0 + * 2) copy the 32-bit predicate value to the upload BO + * 3) use the new allocated VA address for predication + * + * Based on the conditionalrender demo, it's faster to do the COPY_DATA + * in ME (+ sync PFP) instead of PFP. + */ + radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset); + + new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset; + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, new_va); + radeon_emit(cs, new_va >> 32); + + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); + radeon_emit(cs, 0); /* Enable predication for this command buffer. */ - si_emit_set_predication_state(cmd_buffer, inverted, va); + si_emit_set_predication_state(cmd_buffer, draw_visible, new_va); cmd_buffer->state.predicating = true; /* Store conditional rendering user info. */ - cmd_buffer->state.predication_type = inverted; - cmd_buffer->state.predication_va = va; + cmd_buffer->state.predication_type = draw_visible; + cmd_buffer->state.predication_va = new_va; } -void vkCmdEndConditionalRenderingEXT( +void radv_CmdEndConditionalRenderingEXT( VkCommandBuffer commandBuffer) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); @@ -4497,3 +5480,274 @@ void vkCmdEndConditionalRenderingEXT( cmd_buffer->state.predication_type = -1; cmd_buffer->state.predication_va = 0; } + +/* VK_EXT_transform_feedback */ +void radv_CmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; + uint8_t enabled_mask = 0; + + assert(firstBinding + bindingCount <= MAX_SO_BUFFERS); + for (uint32_t i = 0; i < bindingCount; i++) { + uint32_t idx = firstBinding + i; + + sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]); + sb[idx].offset = pOffsets[i]; + sb[idx].size = pSizes[i]; + + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, + sb[idx].buffer->bo); + + enabled_mask |= 1 << idx; + } + + cmd_buffer->state.streamout.enabled_mask |= enabled_mask; + + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER; +} + +static void +radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + + radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2); + radeon_emit(cs, + S_028B94_STREAMOUT_0_EN(so->streamout_enabled) | + S_028B94_RAST_STREAM(0) | + S_028B94_STREAMOUT_1_EN(so->streamout_enabled) | + S_028B94_STREAMOUT_2_EN(so->streamout_enabled) | + S_028B94_STREAMOUT_3_EN(so->streamout_enabled)); + radeon_emit(cs, so->hw_enabled_mask & + so->enabled_stream_buffers_mask); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + +static void +radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + bool old_streamout_enabled = so->streamout_enabled; + uint32_t old_hw_enabled_mask = so->hw_enabled_mask; + + so->streamout_enabled = enable; + + so->hw_enabled_mask = so->enabled_mask | + (so->enabled_mask << 4) | + (so->enabled_mask << 8) | + (so->enabled_mask << 12); + + if ((old_streamout_enabled != so->streamout_enabled) || + (old_hw_enabled_mask != so->hw_enabled_mask)) + radv_emit_streamout_enable(cmd_buffer); +} + +static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) +{ + struct radeon_cmdbuf *cs = cmd_buffer->cs; + unsigned reg_strmout_cntl; + + /* The register is at different places on different ASICs. */ + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { + reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL; + radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0); + } else { + reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL; + radeon_set_config_reg(cs, reg_strmout_cntl, 0); + } + + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0)); + + radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); + radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */ + radeon_emit(cs, reg_strmout_cntl >> 2); /* register */ + radeon_emit(cs, 0); + radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */ + radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */ + radeon_emit(cs, 4); /* poll interval */ +} + +void radv_CmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings; + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + radv_flush_vgt_streamout(cmd_buffer); + + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + /* AMD GCN binds streamout buffers as shader resources. + * VGT only counts primitives and tells the shader through + * SGPRs what to do. + */ + radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2); + radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */ + radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */ + + cmd_buffer->state.context_roll_without_scissor_emitted = true; + + if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) { + /* The array of counter buffers is optional. */ + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + uint64_t va = radv_buffer_get_va(buffer->bo); + + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + /* Append */ + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_DATA_TYPE(1) | /* offset in bytes */ + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, va); /* src address lo */ + radeon_emit(cs, va >> 32); /* src address hi */ + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } else { + /* Start from the beginning. */ + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_DATA_TYPE(1) | /* offset in bytes */ + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + } + } + + radv_set_streamout_enable(cmd_buffer, true); +} + +void radv_CmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + radv_flush_vgt_streamout(cmd_buffer); + + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) { + /* The array of counters buffer is optional. */ + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + uint64_t va = radv_buffer_get_va(buffer->bo); + + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_DATA_TYPE(1) | /* offset in bytes */ + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) | + STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */ + radeon_emit(cs, va); /* dst address lo */ + radeon_emit(cs, va >> 32); /* dst address hi */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } + + /* Deactivate transform feedback by zeroing the buffer size. + * The counters (primitives generated, primitives emitted) may + * be enabled even if there is not buffer bound. This ensures + * that the primitives-emitted query won't increment. + */ + radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0); + + cmd_buffer->state.context_roll_without_scissor_emitted = true; + } + + radv_set_streamout_enable(cmd_buffer, false); +} + +void radv_CmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer _counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer); + struct radv_draw_info info = {}; + + info.instance_count = instanceCount; + info.first_instance = firstInstance; + info.strmout_buffer = counterBuffer; + info.strmout_buffer_offset = counterBufferOffset; + info.stride = vertexStride; + + radv_draw(cmd_buffer, &info); +} + +/* VK_AMD_buffer_marker */ +void radv_CmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset; + + si_emit_cache_flush(cmd_buffer); + + if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) { + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, marker); + radeon_emit(cs, 0); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + } else { + si_cs_emit_write_event_eop(cs, + cmd_buffer->device->physical_device->rad_info.chip_class, + radv_cmd_buffer_uses_mec(cmd_buffer), + V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DATA_SEL_VALUE_32BIT, + va, marker, + cmd_buffer->gfx9_eop_bug_va); + } +}