X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=f311b978b30a22220010beb5bc78a4e410252c57;hb=dc6e3053a788b1e2cb63254ffb63a480adac3be7;hp=989372e48b782ac532d4d949e8c1fb8d8eb79a05;hpb=bd1186572f6924a15ea10cd72a95c6d451016bae;p=mesa.git diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 989372e48b7..f311b978b30 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -30,8 +30,8 @@ #include "radv_shader.h" #include "radv_cs.h" #include "sid.h" -#include "gfx9d.h" #include "vk_format.h" +#include "vk_util.h" #include "radv_debug.h" #include "radv_meta.h" @@ -57,7 +57,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range); + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs); const struct radv_dynamic_state default_dynamic_state = { .viewport = { @@ -105,6 +106,7 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, dest->viewport.count = src->viewport.count; dest->scissor.count = src->scissor.count; dest->discard_rectangle.count = src->discard_rectangle.count; + dest->sample_location.count = src->sample_location.count; if (copy_mask & RADV_DYNAMIC_VIEWPORT) { if (memcmp(&dest->viewport.viewports, &src->viewport.viewports, @@ -192,6 +194,22 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, } } + if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) { + if (dest->sample_location.per_pixel != src->sample_location.per_pixel || + dest->sample_location.grid_size.width != src->sample_location.grid_size.width || + dest->sample_location.grid_size.height != src->sample_location.grid_size.height || + memcmp(&dest->sample_location.locations, + &src->sample_location.locations, + src->sample_location.count * sizeof(VkSampleLocationEXT))) { + dest->sample_location.per_pixel = src->sample_location.per_pixel; + dest->sample_location.grid_size = src->sample_location.grid_size; + typed_memcpy(dest->sample_location.locations, + src->sample_location.locations, + src->sample_location.count); + dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS; + } + } + cmd_buffer->state.dirty |= dest_mask; } @@ -215,7 +233,7 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) { return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; } enum ring_type radv_queue_family_to_ring(int f) { @@ -301,7 +319,6 @@ radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) static VkResult radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { - cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, @@ -326,6 +343,8 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->record_result = VK_SUCCESS; + memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings)); + for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) { cmd_buffer->descriptors[i].dirty = 0; cmd_buffer->descriptors[i].valid = 0; @@ -338,14 +357,15 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) unsigned fence_offset, eop_bug_offset; void *fence_ptr; - radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, &fence_offset, + radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset, &fence_ptr); + cmd_buffer->gfx9_fence_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); cmd_buffer->gfx9_fence_va += fence_offset; /* Allocate a buffer for the EOP bug on GFX9. */ - radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0, + radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8, &eop_bug_offset, &fence_ptr); cmd_buffer->gfx9_eop_bug_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -416,6 +436,8 @@ radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, unsigned *out_offset, void **ptr) { + assert(util_is_power_of_two_nonzero(alignment)); + uint64_t offset = align(cmd_buffer->upload.offset, alignment); if (offset + size > cmd_buffer->upload.size) { if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) @@ -562,8 +584,8 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer, for_each_bit(i, descriptors_state->valid) { struct radv_descriptor_set *set = descriptors_state->sets[i]; - data[i * 2] = (uintptr_t)set; - data[i * 2 + 1] = (uintptr_t)set >> 32; + data[i * 2] = (uint64_t)(uintptr_t)set; + data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32; } radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data); @@ -628,6 +650,190 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer, } } +/** + * Convert the user sample locations to hardware sample locations (the values + * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*). + */ +static void +radv_convert_user_sample_locs(struct radv_sample_locations_state *state, + uint32_t x, uint32_t y, VkOffset2D *sample_locs) +{ + uint32_t x_offset = x % state->grid_size.width; + uint32_t y_offset = y % state->grid_size.height; + uint32_t num_samples = (uint32_t)state->per_pixel; + VkSampleLocationEXT *user_locs; + uint32_t pixel_offset; + + pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples; + + assert(pixel_offset <= MAX_SAMPLE_LOCATIONS); + user_locs = &state->locations[pixel_offset]; + + for (uint32_t i = 0; i < num_samples; i++) { + float shifted_pos_x = user_locs[i].x - 0.5; + float shifted_pos_y = user_locs[i].y - 0.5; + + int32_t scaled_pos_x = floor(shifted_pos_x * 16); + int32_t scaled_pos_y = floor(shifted_pos_y * 16); + + sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7); + sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7); + } +} + +/** + * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample + * locations. + */ +static void +radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs, + uint32_t *sample_locs_pixel) +{ + for (uint32_t i = 0; i < num_samples; i++) { + uint32_t sample_reg_idx = i / 4; + uint32_t sample_loc_idx = i % 4; + int32_t pos_x = sample_locs[i].x; + int32_t pos_y = sample_locs[i].y; + + uint32_t shift_x = 8 * sample_loc_idx; + uint32_t shift_y = shift_x + 4; + + sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x; + sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y; + } +} + +/** + * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware + * sample locations. + */ +static uint64_t +radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer, + VkOffset2D *sample_locs, + uint32_t num_samples) +{ + uint32_t centroid_priorities[num_samples]; + uint32_t sample_mask = num_samples - 1; + uint32_t distances[num_samples]; + uint64_t centroid_priority = 0; + + /* Compute the distances from center for each sample. */ + for (int i = 0; i < num_samples; i++) { + distances[i] = (sample_locs[i].x * sample_locs[i].x) + + (sample_locs[i].y * sample_locs[i].y); + } + + /* Compute the centroid priorities by looking at the distances array. */ + for (int i = 0; i < num_samples; i++) { + uint32_t min_idx = 0; + + for (int j = 1; j < num_samples; j++) { + if (distances[j] < distances[min_idx]) + min_idx = j; + } + + centroid_priorities[i] = min_idx; + distances[min_idx] = 0xffffffff; + } + + /* Compute the final centroid priority. */ + for (int i = 0; i < 8; i++) { + centroid_priority |= + centroid_priorities[i & sample_mask] << (i * 4); + } + + return centroid_priority << 32 | centroid_priority; +} + +/** + * Emit the sample locations that are specified with VK_EXT_sample_locations. + */ +static void +radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; + struct radv_multisample_state *ms = &pipeline->graphics.ms; + struct radv_sample_locations_state *sample_location = + &cmd_buffer->state.dynamic.sample_location; + uint32_t num_samples = (uint32_t)sample_location->per_pixel; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t sample_locs_pixel[4][2] = {}; + VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */ + uint32_t max_sample_dist = 0; + uint64_t centroid_priority; + + if (!cmd_buffer->state.dynamic.sample_location.count) + return; + + /* Convert the user sample locations to hardware sample locations. */ + radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]); + radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]); + radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]); + radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]); + + /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */ + for (uint32_t i = 0; i < 4; i++) { + radv_compute_sample_locs_pixel(num_samples, sample_locs[i], + sample_locs_pixel[i]); + } + + /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */ + centroid_priority = + radv_compute_centroid_priority(cmd_buffer, sample_locs[0], + num_samples); + + /* Compute the maximum sample distance from the specified locations. */ + for (uint32_t i = 0; i < num_samples; i++) { + VkOffset2D offset = sample_locs[0][i]; + max_sample_dist = MAX2(max_sample_dist, + MAX2(abs(offset.x), abs(offset.y))); + } + + /* Emit the specified user sample locations. */ + switch (num_samples) { + case 2: + case 4: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + break; + case 8: + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]); + radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]); + radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]); + radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]); + radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]); + break; + default: + unreachable("invalid number of samples"); + } + + /* Emit the maximum sample distance and the centroid priority. */ + uint32_t pa_sc_aa_config = ms->pa_sc_aa_config; + + pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST; + pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist); + + radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1); + radeon_emit(cs, pa_sc_aa_config); + + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, centroid_priority); + radeon_emit(cs, centroid_priority >> 32); + + /* GFX9: Flush DFSM when the AA mode changes. */ + if (cmd_buffer->device->dfsm_allowed) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); + } + + cmd_buffer->state.context_roll_without_scissor_emitted = true; +} + static void radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pipeline, @@ -665,7 +871,7 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); - radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); + radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); /* GFX9: Flush DFSM when the AA mode changes. */ if (cmd_buffer->device->dfsm_allowed) { @@ -1034,12 +1240,13 @@ static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, struct radv_attachment_info *att, - struct radv_image *image, + struct radv_image_view *iview, VkImageLayout layout) { - bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; + bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; + struct radv_image *image = iview->image; if (!radv_layout_dcc_compressed(image, layout, radv_image_queue_family_mask(image, @@ -1048,6 +1255,15 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_DCC_ENABLE; } + if (radv_image_is_tc_compat_cmask(image) && + (radv_is_fmask_decompress_pipeline(cmd_buffer) || + radv_is_dcc_decompress_pipeline(cmd_buffer))) { + /* If this bit is set, the FMASK decompression operation + * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS). + */ + cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; + } + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1067,7 +1283,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32)); radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4, - S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch)); + cb->cb_mrt_epitch); } else { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1087,9 +1303,17 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, } } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, iview->base_mip)) { /* Drawing with DCC enabled also compresses colorbuffers. */ - radv_update_dcc_metadata(cmd_buffer, image, true); + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + radv_update_dcc_metadata(cmd_buffer, image, &range, true); } } @@ -1272,7 +1496,7 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ++reg_count; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); @@ -1296,7 +1520,7 @@ radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, uint64_t va = radv_buffer_get_va(image->bo); va += image->offset + image->tc_compat_zrange_offset; - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); @@ -1373,7 +1597,7 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -1401,22 +1625,27 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->fce_pred_offset; + uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1424,22 +1653,27 @@ radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, bool value) + struct radv_image *image, + const VkImageSubresourceRange *range, bool value) { uint64_t pred_val = value; - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->dcc_pred_offset; + uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_dcc(image)); + assert(radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0)); radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cmd_buffer->cs, va); radeon_emit(cmd_buffer->cs, va >> 32); - radeon_emit(cmd_buffer->cs, pred_val); - radeon_emit(cmd_buffer->cs, pred_val >> 32); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cmd_buffer->cs, pred_val); + radeon_emit(cmd_buffer->cs, pred_val >> 32); + } } /** @@ -1481,23 +1715,28 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, static void radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t color_values[2]) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); + uint32_t count = 2 * level_count; - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, color_values[0]); - radeon_emit(cs, color_values[1]); + + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, color_values[0]); + radeon_emit(cs, color_values[1]); + } } /** @@ -1505,13 +1744,23 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, int cb_idx, uint32_t color_values[2]) { - assert(radv_image_has_cmask(image) || radv_image_has_dcc(image)); + struct radv_image *image = iview->image; + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + + assert(radv_image_has_cmask(image) || + radv_dcc_enabled(image, iview->base_mip)); - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values); radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx, color_values); @@ -1522,27 +1771,26 @@ radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + struct radv_image_view *iview, int cb_idx) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - - va += image->offset + image->clear_value_offset; + struct radv_image *image = iview->image; + uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip); - if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image)) + if (!radv_image_has_cmask(image) && + !radv_dcc_enabled(image, iview->base_mip)) return; uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { + if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); radeon_emit(cs, 2); } else { - /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating)); radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | COPY_DATA_DST_SEL(COPY_DATA_REG) | @@ -1578,17 +1826,19 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int idx = subpass->color_attachments[i].attachment; struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; + struct radv_image_view *iview = att->attachment; + struct radv_image *image = iview->image; VkImageLayout layout = subpass->color_attachments[i].layout; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); - radv_emit_fb_color_state(cmd_buffer, i, att, image, layout); + assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); + radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); - radv_load_color_clear_metadata(cmd_buffer, image, i); + radv_load_color_clear_metadata(cmd_buffer, iview, i); - if (image->surface.bpe >= 8) + if (image->planes[0].surface.bpe >= 8) num_bpp64_colorbufs++; } @@ -1625,8 +1875,8 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028208_BR_X(framebuffer->width) | S_028208_BR_Y(framebuffer->height)); - if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) { - uint8_t watermark = 4; /* Default value for VI. */ + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { + uint8_t watermark = 4; /* Default value for GFX8. */ /* For optimal DCC performance. */ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { @@ -1687,7 +1937,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) uint32_t db_count_control; if(!cmd_buffer->state.active_occlusion_queries) { - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) && pipeline->graphics.disable_out_of_order_rast_for_occlusion && has_perfect_queries) { @@ -1706,7 +1956,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) const struct radv_subpass *subpass = cmd_buffer->state.subpass; uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0; - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { db_count_control = S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | S_028004_SAMPLE_RATE(sample_rate) | @@ -1771,6 +2021,9 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE) radv_emit_discard_rectangle(cmd_buffer); + if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) + radv_emit_sample_locations(cmd_buffer); + cmd_buffer->state.dirty &= ~states; } @@ -1986,13 +2239,13 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, { if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && - cmd_buffer->state.pipeline->vertex_elements.count && + cmd_buffer->state.pipeline->num_vertex_bindings && radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; unsigned vb_offset; void *vb_ptr; uint32_t i = 0; - uint32_t count = velems->count; + uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings; uint64_t va; /* allocate some descriptor state for vertex buffers */ @@ -2003,21 +2256,28 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, for (i = 0; i < count; i++) { uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; uint32_t offset; - int vb = velems->binding[i]; - struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer; - uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; + struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; + uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + + if (!buffer) + continue; va = radv_buffer_get_va(buffer->bo); - offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i]; + offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; desc[0] = va; desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) + if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; else desc[2] = buffer->size - offset; - desc[3] = velems->rsrc_word3[i]; + desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | + S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2095,7 +2355,7 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) /* Set the descriptor. * - * On VI, the format must be non-INVALID, otherwise + * On GFX8, the format must be non-INVALID, otherwise * the buffer will be considered not bound and store * instructions will be no-ops. */ @@ -2192,6 +2452,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1, draw_info->indirect, + !!draw_info->strmout_buffer, draw_info->indirect ? 0 : draw_info->count); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { @@ -2199,7 +2460,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param); - } else if (info->chip_class >= CIK) { + } else if (info->chip_class >= GFX7) { radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); @@ -2427,11 +2688,67 @@ void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, NULL); } +static uint32_t +radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = state->subpass - state->pass->subpasses; + + /* The id of this subpass shouldn't exceed the number of subpasses in + * this render pass minus 1. + */ + assert(subpass_id < state->pass->subpass_count); + return subpass_id; +} + +static struct radv_sample_locations_state * +radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, + uint32_t att_idx, + bool begin_subpass) +{ + struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); + struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + + if (view->image->info.samples == 1) + return NULL; + + if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) { + /* Return the initial sample locations if this is the initial + * layout transition of the given subpass attachemnt. + */ + if (state->attachments[att_idx].sample_location.count > 0) + return &state->attachments[att_idx].sample_location; + } else { + /* Otherwise return the subpass sample locations if defined. */ + if (state->subpass_sample_locs) { + /* Because the driver sets the current subpass before + * initial layout transitions, we should use the sample + * locations from the previous subpass to avoid an + * off-by-one problem. Otherwise, use the sample + * locations for the current subpass for final layout + * transitions. + */ + if (begin_subpass) + subpass_id--; + + for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) { + if (state->subpass_sample_locs[i].subpass_idx == subpass_id) + return &state->subpass_sample_locs[i].sample_location; + } + } + } + + return NULL; +} + static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, - struct radv_subpass_attachment att) + struct radv_subpass_attachment att, + bool begin_subpass) { unsigned idx = att.attachment; struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; range.aspectMask = 0; range.baseMipLevel = view->base_mip; @@ -2439,7 +2756,7 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.baseArrayLayer = view->base_layer; range.layerCount = cmd_buffer->state.framebuffer->layers; - if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) { + if (cmd_buffer->state.subpass->view_mask) { /* If the current subpass uses multiview, the driver might have * performed a fast color/depth clear to the whole image * (including all layers). To make sure the driver will @@ -2450,10 +2767,16 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask); } + /* Get the subpass sample locations for the given attachment, if NULL + * is returned the driver will use the default HW locations. + */ + sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, + begin_subpass); + radv_handle_image_transition(cmd_buffer, view->image, cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range); + att.layout, 0, 0, &range, sample_locs); cmd_buffer->state.attachments[idx].current_layout = att.layout; @@ -2469,6 +2792,89 @@ radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; } +static VkResult +radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, + struct radv_render_pass *pass, + const VkRenderPassBeginInfo *info) +{ + const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs = + vk_find_struct_const(info->pNext, + RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); + struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_framebuffer *framebuffer = state->framebuffer; + + if (!sample_locs) { + state->subpass_sample_locs = NULL; + return VK_SUCCESS; + } + + for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) { + const VkAttachmentSampleLocationsEXT *att_sample_locs = + &sample_locs->pAttachmentInitialSampleLocations[i]; + uint32_t att_idx = att_sample_locs->attachmentIndex; + struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; + struct radv_image *image = att->attachment->image; + + assert(vk_format_is_depth_or_stencil(image->vk_format)); + + /* From the Vulkan spec 1.1.108: + * + * "If the image referenced by the framebuffer attachment at + * index attachmentIndex was not created with + * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT + * then the values specified in sampleLocationsInfo are + * ignored." + */ + if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) + continue; + + const VkSampleLocationsInfoEXT *sample_locs_info = + &att_sample_locs->sampleLocationsInfo; + + state->attachments[att_idx].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->attachments[att_idx].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->attachments[att_idx].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->attachments[att_idx].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc, + sample_locs->postSubpassSampleLocationsCount * + sizeof(state->subpass_sample_locs[0]), + 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (state->subpass_sample_locs == NULL) { + cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY; + return cmd_buffer->record_result; + } + + state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount; + + for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) { + const VkSubpassSampleLocationsEXT *subpass_sample_locs_info = + &sample_locs->pPostSubpassSampleLocations[i]; + const VkSampleLocationsInfoEXT *sample_locs_info = + &subpass_sample_locs_info->sampleLocationsInfo; + + state->subpass_sample_locs[i].subpass_idx = + subpass_sample_locs_info->subpassIndex; + state->subpass_sample_locs[i].sample_location.per_pixel = + sample_locs_info->sampleLocationsPerPixel; + state->subpass_sample_locs[i].sample_location.grid_size = + sample_locs_info->sampleLocationGridSize; + state->subpass_sample_locs[i].sample_location.count = + sample_locs_info->sampleLocationsCount; + typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + + return VK_SUCCESS; +} + static VkResult radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, struct radv_render_pass *pass, @@ -2523,6 +2929,7 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].sample_location.count = 0; } return VK_SUCCESS; @@ -2882,6 +3289,14 @@ void radv_CmdPushDescriptorSetKHR( pipelineBindPoint)) return; + /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR() + * because it is invalid, according to Vulkan spec. + */ + for (int i = 0; i < descriptorWriteCount; i++) { + MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i]; + assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); + } + radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer, radv_descriptor_set_to_handle(push_set), descriptorWriteCount, pDescriptorWrites, 0, NULL); @@ -2936,8 +3351,14 @@ VkResult radv_EndCommandBuffer( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) { - if (cmd_buffer->device->physical_device->rad_info.chip_class == SI) + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6) cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2; + + /* Make sure to sync all pending active queries at the end of + * command buffer. + */ + cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; + si_emit_cache_flush(cmd_buffer); } @@ -2947,6 +3368,7 @@ VkResult radv_EndCommandBuffer( si_cp_dma_wait_for_idle(cmd_buffer); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); @@ -3254,6 +3676,25 @@ void radv_CmdSetDiscardRectangleEXT( state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE; } +void radv_CmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS); + + state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel; + state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize; + state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount; + typed_memcpy(&state->dynamic.sample_location.locations[0], + pSampleLocationsInfo->pSampleLocations, + pSampleLocationsInfo->sampleLocationsCount); + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS; +} + void radv_CmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, @@ -3426,19 +3867,6 @@ void radv_TrimCommandPool( } } -static uint32_t -radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer) -{ - struct radv_cmd_state *state = &cmd_buffer->state; - uint32_t subpass_id = state->subpass - state->pass->subpasses; - - /* The id of this subpass shouldn't exceed the number of subpasses in - * this render pass minus 1. - */ - assert(subpass_id < state->pass->subpass_count); - return subpass_id; -} - static void radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, uint32_t subpass_id) @@ -3447,20 +3875,22 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, struct radv_subpass *subpass = &state->pass->subpasses[subpass_id]; MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, - cmd_buffer->cs, 2048); + cmd_buffer->cs, 4096); radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); + radv_cmd_buffer_set_subpass(cmd_buffer, subpass); + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { const uint32_t a = subpass->attachments[i].attachment; if (a == VK_ATTACHMENT_UNUSED) continue; radv_handle_subpass_image_transition(cmd_buffer, - subpass->attachments[i]); + subpass->attachments[i], + true); } - radv_cmd_buffer_set_subpass(cmd_buffer, subpass); radv_cmd_buffer_clear_subpass(cmd_buffer); assert(cmd_buffer->cs->cdw <= cdw_max); @@ -3484,8 +3914,8 @@ radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) continue; VkImageLayout layout = state->pass->attachments[a].final_layout; - radv_handle_subpass_image_transition(cmd_buffer, - (struct radv_subpass_attachment){a, layout}); + struct radv_subpass_attachment att = { a, layout }; + radv_handle_subpass_image_transition(cmd_buffer, att, false); } } @@ -3507,6 +3937,10 @@ void radv_CmdBeginRenderPass( if (result != VK_SUCCESS) return; + result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin); + if (result != VK_SUCCESS) + return; + radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } @@ -3812,11 +4246,11 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER) radv_emit_index_buffer(cmd_buffer); } else { - /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, + /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE, * so the state must be re-emitted before the next indexed * draw. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { cmd_buffer->state.last_index_type = -1; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; } @@ -3837,7 +4271,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, struct radeon_info *rad_info = &cmd_buffer->device->physical_device->rad_info; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) && cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline; @@ -3847,7 +4281,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, cmd_buffer->cs, 4096); if (likely(!info->indirect)) { - /* SI-CI treat instance_count==0 as instance_count==1. There is + /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is * no workaround for indirect draws, but we can at least skip * direct draws. */ @@ -4010,55 +4444,6 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - -void radv_CmdDrawIndexedIndirectCountAMD( - VkCommandBuffer commandBuffer, - VkBuffer _buffer, - VkDeviceSize offset, - VkBuffer _countBuffer, - VkDeviceSize countBufferOffset, - uint32_t maxDrawCount, - uint32_t stride) -{ - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; - - info.indexed = true; - info.count = maxDrawCount; - info.indirect = buffer; - info.indirect_offset = offset; - info.count_buffer = count_buffer; - info.count_buffer_offset = countBufferOffset; - info.stride = stride; - - radv_draw(cmd_buffer, &info); -} - void radv_CmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer _buffer, @@ -4274,7 +4659,7 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer, { struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; bool has_prefetch = - cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; + cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7; bool pipeline_is_dirty = pipeline && pipeline != cmd_buffer->state.emitted_compute_pipeline; @@ -4393,16 +4778,18 @@ void radv_CmdEndRenderPass( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_cmd_buffer_end_subpass(cmd_buffer); - radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); + radv_cmd_buffer_end_subpass(cmd_buffer); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); cmd_buffer->state.pass = NULL; cmd_buffer->state.subpass = NULL; cmd_buffer->state.attachments = NULL; cmd_buffer->state.framebuffer = NULL; + cmd_buffer->state.subpass_sample_locs = NULL; } void radv_CmdEndRenderPass2KHR( @@ -4426,19 +4813,14 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, { assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); - unsigned layer_count = radv_get_layerCount(image, range); - uint64_t size = image->surface.htile_slice_size * layer_count; VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; - uint64_t offset = image->offset + image->htile_offset + - image->surface.htile_slice_size * range->baseArrayLayer; struct radv_cmd_state *state = &cmd_buffer->state; VkClearDepthStencilValue value = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset, - size, clear_word); + state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4463,15 +4845,21 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe VkImageLayout dst_layout, unsigned src_queue_mask, unsigned dst_queue_mask, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (!radv_image_has_htile(image)) return; - if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && - radv_layout_has_htile(image, dst_layout, dst_queue_mask)) { - /* TODO: merge with the clear if applicable */ - radv_initialize_htile(cmd_buffer, image, range, 0); + if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { + uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; + + if (radv_layout_is_htile_compressed(image, dst_layout, + dst_queue_mask)) { + clear_value = 0; + } + + radv_initialize_htile(cmd_buffer, image, range, clear_value); } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; @@ -4486,7 +4874,8 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range); + radv_decompress_depth_image_inplace(cmd_buffer, image, + &local_range, sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -4528,14 +4917,62 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, } void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, uint32_t value) + struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t level_count = radv_get_levelCount(image, range); + unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; - state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value); + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + /* Mipmap level aren't implemented. */ + assert(level_count == 1); + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, + range, value); + } else { + /* Initialize the mipmap levels with DCC first. */ + for (unsigned l = 0; l < level_count; l++) { + uint32_t level = range->baseMipLevel + l; + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[level]; + + if (!surf_level->dcc_fast_clear_size) + break; + + state->flush_bits |= + radv_dcc_clear_level(cmd_buffer, image, + level, value); + } + + /* When DCC is enabled with mipmaps, some levels might not + * support fast clears and we have to initialize them as "fully + * expanded". + */ + if (image->planes[0].surface.num_dcc_levels > 1) { + /* Compute the size of all fast clearable DCC levels. */ + for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) { + struct legacy_surf_level *surf_level = + &image->planes[0].surface.u.legacy.level[i]; + + if (!surf_level->dcc_fast_clear_size) + break; + + size = surf_level->dcc_offset + surf_level->dcc_fast_clear_size; + } + + /* Initialize the mipmap levels without DCC. */ + if (size != image->planes[0].surface.dcc_size) { + state->flush_bits |= + radv_fill_buffer(cmd_buffer, image->bo, + image->offset + image->dcc_offset + size, + image->planes[0].surface.dcc_size - size, + 0xffffffff); + } + } + } state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -4549,7 +4986,8 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, VkImageLayout src_layout, VkImageLayout dst_layout, unsigned src_queue_mask, - unsigned dst_queue_mask) + unsigned dst_queue_mask, + const VkImageSubresourceRange *range) { if (radv_image_has_cmask(image)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ @@ -4566,7 +5004,7 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, radv_initialize_fmask(cmd_buffer, image); } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; @@ -4576,15 +5014,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, need_decompress_pass = true; } - radv_initialize_dcc(cmd_buffer, image, value); + radv_initialize_dcc(cmd_buffer, image, range, value); - radv_update_fce_metadata(cmd_buffer, image, + radv_update_fce_metadata(cmd_buffer, image, range, need_decompress_pass); } - if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) { + if (radv_image_has_cmask(image) || + radv_dcc_enabled(image, range->baseMipLevel)) { uint32_t color_values[2] = {}; - radv_set_color_clear_metadata(cmd_buffer, image, color_values); + radv_set_color_clear_metadata(cmd_buffer, image, range, + color_values); } } @@ -4602,13 +5042,14 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, src_layout, dst_layout, - src_queue_mask, dst_queue_mask); + src_queue_mask, dst_queue_mask, + range); return; } - if (radv_image_has_dcc(image)) { + if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { - radv_initialize_dcc(cmd_buffer, image, 0xffffffffu); + radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); @@ -4617,17 +5058,28 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { + bool fce_eliminate = false, fmask_expand = false; + if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { - radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + fce_eliminate = true; } if (radv_image_has_fmask(image)) { if (src_layout != VK_IMAGE_LAYOUT_GENERAL && dst_layout == VK_IMAGE_LAYOUT_GENERAL) { - radv_expand_fmask_image_inplace(cmd_buffer, image, range); + /* A FMASK decompress is required before doing + * a MSAA decompress using FMASK. + */ + fmask_expand = true; } } + + if (fce_eliminate || fmask_expand) + radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); + + if (fmask_expand) + radv_expand_fmask_image_inplace(cmd_buffer, image, range); } } @@ -4637,7 +5089,8 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, VkImageLayout dst_layout, uint32_t src_family, uint32_t dst_family, - const VkImageSubresourceRange *range) + const VkImageSubresourceRange *range, + struct radv_sample_locations_state *sample_locs) { if (image->exclusive && src_family != dst_family) { /* This is an acquire or a release operation and there will be @@ -4647,6 +5100,9 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, assert(src_family == cmd_buffer->queue_family_index || dst_family == cmd_buffer->queue_family_index); + if (src_family == VK_QUEUE_FAMILY_EXTERNAL) + return; + if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) return; @@ -4670,7 +5126,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, radv_handle_depth_image_transition(cmd_buffer, image, src_layout, dst_layout, src_queue_mask, dst_queue_mask, - range); + range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, src_layout, dst_layout, @@ -4752,12 +5208,29 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); + + const struct VkSampleLocationsInfoEXT *sample_locs_info = + vk_find_struct_const(pImageMemoryBarriers[i].pNext, + SAMPLE_LOCATIONS_INFO_EXT); + struct radv_sample_locations_state sample_locations = {}; + + if (sample_locs_info) { + assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT); + sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel; + sample_locations.grid_size = sample_locs_info->sampleLocationGridSize; + sample_locations.count = sample_locs_info->sampleLocationsCount; + typed_memcpy(&sample_locations.locations[0], + sample_locs_info->pSampleLocations, + sample_locs_info->sampleLocationsCount); + } + radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].newLayout, pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, - &pImageMemoryBarriers[i].subresourceRange); + &pImageMemoryBarriers[i].subresourceRange, + sample_locs_info ? &sample_locations : NULL); } /* Make sure CP DMA is idle because the driver might have performed a @@ -4808,7 +5281,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo); - MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18); + MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21); /* Flags that only require a top-of-pipe event. */ VkPipelineStageFlags top_of_pipe_flags = @@ -4918,8 +5391,11 @@ void radv_CmdBeginConditionalRenderingEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer); + struct radeon_cmdbuf *cs = cmd_buffer->cs; bool draw_visible = true; - uint64_t va; + uint64_t pred_value = 0; + uint64_t va, new_va; + unsigned pred_offset; va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset; @@ -4935,13 +5411,51 @@ void radv_CmdBeginConditionalRenderingEXT( si_emit_cache_flush(cmd_buffer); + /* From the Vulkan spec 1.1.107: + * + * "If the 32-bit value at offset in buffer memory is zero, then the + * rendering commands are discarded, otherwise they are executed as + * normal. If the value of the predicate in buffer memory changes while + * conditional rendering is active, the rendering commands may be + * discarded in an implementation-dependent way. Some implementations + * may latch the value of the predicate upon beginning conditional + * rendering while others may read it before every rendering command." + * + * But, the AMD hardware treats the predicate as a 64-bit value which + * means we need a workaround in the driver. Luckily, it's not required + * to support if the value changes when predication is active. + * + * The workaround is as follows: + * 1) allocate a 64-value in the upload BO and initialize it to 0 + * 2) copy the 32-bit predicate value to the upload BO + * 3) use the new allocated VA address for predication + * + * Based on the conditionalrender demo, it's faster to do the COPY_DATA + * in ME (+ sync PFP) instead of PFP. + */ + radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset); + + new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset; + + radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); + radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | + COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | + COPY_DATA_WR_CONFIRM); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, new_va); + radeon_emit(cs, new_va >> 32); + + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); + radeon_emit(cs, 0); + /* Enable predication for this command buffer. */ - si_emit_set_predication_state(cmd_buffer, draw_visible, va); + si_emit_set_predication_state(cmd_buffer, draw_visible, new_va); cmd_buffer->state.predicating = true; /* Store conditional rendering user info. */ cmd_buffer->state.predication_type = draw_visible; - cmd_buffer->state.predication_va = va; + cmd_buffer->state.predication_va = new_va; } void radv_CmdEndConditionalRenderingEXT( @@ -4985,7 +5499,7 @@ void radv_CmdBindTransformFeedbackBuffersEXT( enabled_mask |= 1 << idx; } - cmd_buffer->state.streamout.enabled_mask = enabled_mask; + cmd_buffer->state.streamout.enabled_mask |= enabled_mask; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER; } @@ -5034,7 +5548,7 @@ static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) unsigned reg_strmout_cntl; /* The register is at different places on different ASICs. */ - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL; radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0); } else { @@ -5075,7 +5589,7 @@ void radv_CmdBeginTransformFeedbackEXT( if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) counter_buffer_idx = -1; - /* SI binds streamout buffers as shader resources. + /* AMD GCN binds streamout buffers as shader resources. * VGT only counts primitives and tells the shader through * SGPRs what to do. */