amd/common: use generated register header
[mesa.git] / src / amd / vulkan / radv_cmd_buffer.c
index f9f57a106567b23ad95774efae1918530113cbf2..12e112f95c87cec5a6383b99784a44ee186a5410 100644 (file)
@@ -30,7 +30,6 @@
 #include "radv_shader.h"
 #include "radv_cs.h"
 #include "sid.h"
-#include "gfx9d.h"
 #include "vk_format.h"
 #include "radv_debug.h"
 #include "radv_meta.h"
@@ -57,8 +56,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
                                         VkImageLayout dst_layout,
                                         uint32_t src_family,
                                         uint32_t dst_family,
-                                        const VkImageSubresourceRange *range,
-                                        VkImageAspectFlags pending_clears);
+                                        const VkImageSubresourceRange *range);
 
 const struct radv_dynamic_state default_dynamic_state = {
        .viewport = {
@@ -106,6 +104,7 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
        dest->viewport.count = src->viewport.count;
        dest->scissor.count = src->scissor.count;
        dest->discard_rectangle.count = src->discard_rectangle.count;
+       dest->sample_location.count = src->sample_location.count;
 
        if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
                if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
@@ -193,13 +192,46 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
                }
        }
 
+       if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
+               if (dest->sample_location.per_pixel != src->sample_location.per_pixel ||
+                   dest->sample_location.grid_size.width != src->sample_location.grid_size.width ||
+                   dest->sample_location.grid_size.height != src->sample_location.grid_size.height ||
+                   memcmp(&dest->sample_location.locations,
+                          &src->sample_location.locations,
+                          src->sample_location.count * sizeof(VkSampleLocationEXT))) {
+                       dest->sample_location.per_pixel = src->sample_location.per_pixel;
+                       dest->sample_location.grid_size = src->sample_location.grid_size;
+                       typed_memcpy(dest->sample_location.locations,
+                                    src->sample_location.locations,
+                                    src->sample_location.count);
+                       dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS;
+               }
+       }
+
        cmd_buffer->state.dirty |= dest_mask;
 }
 
+static void
+radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
+                         struct radv_pipeline *pipeline)
+{
+       struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+       struct radv_shader_info *info;
+
+       if (!pipeline->streamout_shader)
+               return;
+
+       info = &pipeline->streamout_shader->info.info;
+       for (int i = 0; i < MAX_SO_BUFFERS; i++)
+               so->stride_in_dw[i] = info->so.strides[i];
+
+       so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
+}
+
 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
 {
        return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
-              cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+              cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
 }
 
 enum ring_type radv_queue_family_to_ring(int f) {
@@ -285,7 +317,6 @@ radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
 static VkResult
 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
 {
-
        cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
 
        list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
@@ -310,24 +341,29 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
 
        cmd_buffer->record_result = VK_SUCCESS;
 
+       memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
+
        for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
                cmd_buffer->descriptors[i].dirty = 0;
                cmd_buffer->descriptors[i].valid = 0;
                cmd_buffer->descriptors[i].push_dirty = false;
        }
 
-       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
+           cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
                unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
-               unsigned eop_bug_offset;
+               unsigned fence_offset, eop_bug_offset;
                void *fence_ptr;
 
-               radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
-                                            &cmd_buffer->gfx9_fence_offset,
+               radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
                                             &fence_ptr);
-               cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
+
+               cmd_buffer->gfx9_fence_va =
+                       radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+               cmd_buffer->gfx9_fence_va += fence_offset;
 
                /* Allocate a buffer for the EOP bug on GFX9. */
-               radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
+               radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
                                             &eop_bug_offset, &fence_ptr);
                cmd_buffer->gfx9_eop_bug_va =
                        radv_buffer_get_va(cmd_buffer->upload.upload_bo);
@@ -356,7 +392,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
                                       RADEON_DOMAIN_GTT,
                                       RADEON_FLAG_CPU_ACCESS|
                                       RADEON_FLAG_NO_INTERPROCESS_SHARING |
-                                      RADEON_FLAG_32BIT);
+                                      RADEON_FLAG_32BIT,
+                                      RADV_BO_PRIORITY_UPLOAD_BUFFER);
 
        if (!bo) {
                cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
@@ -397,6 +434,8 @@ radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
                             unsigned *out_offset,
                             void **ptr)
 {
+       assert(util_is_power_of_two_nonzero(alignment));
+
        uint64_t offset = align(cmd_buffer->upload.offset, alignment);
        if (offset + size > cmd_buffer->upload.size) {
                if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
@@ -429,11 +468,15 @@ radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
 }
 
 static void
-radv_emit_write_data_packet(struct radeon_cmdbuf *cs, uint64_t va,
+radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
                            unsigned count, const uint32_t *data)
 {
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+
+       radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
+
        radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
-       radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+       radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
                    S_370_WR_CONFIRM(1) |
                    S_370_ENGINE_SEL(V_370_ME));
        radeon_emit(cs, va);
@@ -451,10 +494,12 @@ void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
        if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
                va += 4;
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
-
        ++cmd_buffer->state.trace_id;
-       radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
+       radv_emit_write_data_packet(cmd_buffer, va, 1,
+                                   &cmd_buffer->state.trace_id);
+
+       radeon_check_space(cmd_buffer->device->ws, cs, 2);
+
        radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
        radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
 }
@@ -464,22 +509,16 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
                           enum radv_cmd_flush_bits flags)
 {
        if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
-               uint32_t *ptr = NULL;
-               uint64_t va = 0;
-
                assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
                                RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
 
-               if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
-                       va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) +
-                            cmd_buffer->gfx9_fence_offset;
-                       ptr = &cmd_buffer->gfx9_fence_idx;
-               }
+               radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
 
                /* Force wait for graphics or compute engines to be idle. */
                si_cs_emit_cache_flush(cmd_buffer->cs,
                                       cmd_buffer->device->physical_device->rad_info.chip_class,
-                                      ptr, va,
+                                      &cmd_buffer->gfx9_fence_idx,
+                                      cmd_buffer->gfx9_fence_va,
                                       radv_cmd_buffer_uses_mec(cmd_buffer),
                                       flags, cmd_buffer->gfx9_eop_bug_va);
        }
@@ -493,7 +532,6 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
                   struct radv_pipeline *pipeline, enum ring_type ring)
 {
        struct radv_device *device = cmd_buffer->device;
-       struct radeon_cmdbuf *cs = cmd_buffer->cs;
        uint32_t data[2];
        uint64_t va;
 
@@ -510,13 +548,10 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
                assert(!"invalid ring type");
        }
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
-                                                          cmd_buffer->cs, 6);
-
        data[0] = (uintptr_t)pipeline;
        data[1] = (uintptr_t)pipeline >> 32;
 
-       radv_emit_write_data_packet(cs, va, 2, data);
+       radv_emit_write_data_packet(cmd_buffer, va, 2, data);
 }
 
 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
@@ -540,22 +575,18 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
        struct radv_descriptor_state *descriptors_state =
                radv_get_descriptors_state(cmd_buffer, bind_point);
        struct radv_device *device = cmd_buffer->device;
-       struct radeon_cmdbuf *cs = cmd_buffer->cs;
        uint32_t data[MAX_SETS * 2] = {};
        uint64_t va;
        unsigned i;
        va = radv_buffer_get_va(device->trace_bo) + 24;
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
-                                                          cmd_buffer->cs, 4 + MAX_SETS * 2);
-
        for_each_bit(i, descriptors_state->valid) {
                struct radv_descriptor_set *set = descriptors_state->sets[i];
                data[i * 2] = (uintptr_t)set;
                data[i * 2 + 1] = (uintptr_t)set >> 32;
        }
 
-       radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
+       radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
 }
 
 struct radv_userdata_info *
@@ -578,8 +609,7 @@ radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
        if (loc->sgpr_idx == -1)
                return;
 
-       assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
-       assert(!loc->indirect);
+       assert(loc->num_sgprs == 1);
 
        radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
                                 base_reg + loc->sgpr_idx * 4, va, false);
@@ -608,16 +638,215 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
                struct radv_userdata_info *loc = &locs->descriptor_sets[start];
                unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
 
-               radv_emit_shader_pointer_head(cs, sh_offset, count,
-                                             HAVE_32BIT_POINTERS);
+               radv_emit_shader_pointer_head(cs, sh_offset, count, true);
                for (int i = 0; i < count; i++) {
                        struct radv_descriptor_set *set =
                                descriptors_state->sets[start + i];
 
-                       radv_emit_shader_pointer_body(device, cs, set->va,
-                                                     HAVE_32BIT_POINTERS);
+                       radv_emit_shader_pointer_body(device, cs, set->va, true);
+               }
+       }
+}
+
+/**
+ * Convert the user sample locations to hardware sample locations (the values
+ * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*).
+ */
+static void
+radv_convert_user_sample_locs(struct radv_sample_locations_state *state,
+                             uint32_t x, uint32_t y, VkOffset2D *sample_locs)
+{
+       uint32_t x_offset = x % state->grid_size.width;
+       uint32_t y_offset = y % state->grid_size.height;
+       uint32_t num_samples = (uint32_t)state->per_pixel;
+       VkSampleLocationEXT *user_locs;
+       uint32_t pixel_offset;
+
+       pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples;
+
+       assert(pixel_offset <= MAX_SAMPLE_LOCATIONS);
+       user_locs = &state->locations[pixel_offset];
+
+       for (uint32_t i = 0; i < num_samples; i++) {
+               float shifted_pos_x = user_locs[i].x - 0.5;
+               float shifted_pos_y = user_locs[i].y - 0.5;
+
+               int32_t scaled_pos_x = floor(shifted_pos_x * 16);
+               int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+
+               sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
+               sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
+       }
+}
+
+/**
+ * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample
+ * locations.
+ */
+static void
+radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs,
+                              uint32_t *sample_locs_pixel)
+{
+       for (uint32_t i = 0; i < num_samples; i++) {
+               uint32_t sample_reg_idx = i / 4;
+               uint32_t sample_loc_idx = i % 4;
+               int32_t pos_x = sample_locs[i].x;
+               int32_t pos_y = sample_locs[i].y;
+
+               uint32_t shift_x = 8 * sample_loc_idx;
+               uint32_t shift_y = shift_x + 4;
+
+               sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x;
+               sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y;
+       }
+}
+
+/**
+ * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware
+ * sample locations.
+ */
+static uint64_t
+radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer,
+                              VkOffset2D *sample_locs,
+                              uint32_t num_samples)
+{
+       uint32_t centroid_priorities[num_samples];
+       uint32_t sample_mask = num_samples - 1;
+       uint32_t distances[num_samples];
+       uint64_t centroid_priority = 0;
+
+       /* Compute the distances from center for each sample. */
+       for (int i = 0; i < num_samples; i++) {
+               distances[i] = (sample_locs[i].x * sample_locs[i].x) +
+                              (sample_locs[i].y * sample_locs[i].y);
+       }
+
+       /* Compute the centroid priorities by looking at the distances array. */
+       for (int i = 0; i < num_samples; i++) {
+               uint32_t min_idx = 0;
+
+               for (int j = 1; j < num_samples; j++) {
+                       if (distances[j] < distances[min_idx])
+                               min_idx = j;
                }
+
+               centroid_priorities[i] = min_idx;
+               distances[min_idx] = 0xffffffff;
+       }
+
+       /* Compute the final centroid priority. */
+       for (int i = 0; i < 8; i++) {
+               centroid_priority |=
+                       centroid_priorities[i & sample_mask] << (i * 4);
+       }
+
+       return centroid_priority << 32 | centroid_priority;
+}
+
+/**
+ * Emit the sample locations that are specified with VK_EXT_sample_locations.
+ */
+static void
+radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
+{
+       struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+       struct radv_multisample_state *ms = &pipeline->graphics.ms;
+       struct radv_sample_locations_state *sample_location =
+               &cmd_buffer->state.dynamic.sample_location;
+       uint32_t num_samples = (uint32_t)sample_location->per_pixel;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       uint32_t sample_locs_pixel[4][2] = {};
+       VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
+       uint32_t max_sample_dist = 0;
+       uint64_t centroid_priority;
+
+       if (!cmd_buffer->state.dynamic.sample_location.count)
+               return;
+
+       /* Convert the user sample locations to hardware sample locations. */
+       radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]);
+       radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]);
+       radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]);
+       radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]);
+
+       /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */
+       for (uint32_t i = 0; i < 4; i++) {
+               radv_compute_sample_locs_pixel(num_samples, sample_locs[i],
+                                              sample_locs_pixel[i]);
+       }
+
+       /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */
+       centroid_priority =
+               radv_compute_centroid_priority(cmd_buffer, sample_locs[0],
+                                              num_samples);
+
+       /* Compute the maximum sample distance from the specified locations. */
+       for (uint32_t i = 0; i < num_samples; i++) {
+               VkOffset2D offset = sample_locs[0][i];
+               max_sample_dist = MAX2(max_sample_dist,
+                                      MAX2(abs(offset.x), abs(offset.y)));
        }
+
+       /* Emit the specified user sample locations. */
+       switch (num_samples) {
+       case 2:
+       case 4:
+               radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
+               radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
+               radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
+               radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
+               break;
+       case 8:
+               radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
+               radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
+               radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
+               radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
+               radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]);
+               radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]);
+               radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]);
+               radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]);
+               break;
+       default:
+               unreachable("invalid number of samples");
+       }
+
+       /* Emit the maximum sample distance and the centroid priority. */
+       uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
+
+       pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
+       pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
+
+       radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
+       radeon_emit(cs, pa_sc_aa_config);
+
+       radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
+       radeon_emit(cs, centroid_priority);
+       radeon_emit(cs, centroid_priority >> 32);
+
+       /* GFX9: Flush DFSM when the AA mode changes. */
+       if (cmd_buffer->device->dfsm_allowed) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
+       }
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
+
+static void
+radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
+                            struct radv_pipeline *pipeline,
+                            gl_shader_stage stage,
+                            int idx, int count, uint32_t *values)
+{
+       struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
+       uint32_t base_reg = pipeline->user_data_0[stage];
+       if (loc->sgpr_idx == -1)
+               return;
+
+       assert(loc->num_sgprs == count);
+
+       radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
+       radeon_emit_array(cmd_buffer->cs, values, count);
 }
 
 static void
@@ -640,13 +869,15 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
 
        radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
 
-       radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
+       radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
 
        /* GFX9: Flush DFSM when the AA mode changes. */
        if (cmd_buffer->device->dfsm_allowed) {
                radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
        }
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
 static void
@@ -843,10 +1074,13 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
                sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
                sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
        }
+       /* TODO: avoid redundantly setting context registers */
        radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
        radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
        radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
        radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
 static void
@@ -870,6 +1104,15 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
 
        radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
 
+       if (!cmd_buffer->state.emitted_pipeline ||
+           cmd_buffer->state.emitted_pipeline->ctx_cs.cdw != pipeline->ctx_cs.cdw ||
+           cmd_buffer->state.emitted_pipeline->ctx_cs_hash != pipeline->ctx_cs_hash ||
+           memcmp(cmd_buffer->state.emitted_pipeline->ctx_cs.buf,
+                  pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw * 4)) {
+               radeon_emit_array(cmd_buffer->cs, pipeline->ctx_cs.buf, pipeline->ctx_cs.cdw);
+               cmd_buffer->state.context_roll_without_scissor_emitted = true;
+       }
+
        for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
                if (!pipeline->shaders[i])
                        continue;
@@ -906,6 +1149,8 @@ radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
                          cmd_buffer->state.dynamic.scissor.scissors,
                          cmd_buffer->state.dynamic.viewport.viewports,
                          cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = false;
 }
 
 static void
@@ -996,7 +1241,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
                         struct radv_image *image,
                         VkImageLayout layout)
 {
-       bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
+       bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
        struct radv_color_buffer_info *cb = &att->cb;
        uint32_t cb_color_info = cb->cb_color_info;
 
@@ -1026,7 +1271,7 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
                radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
                
                radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
-                                      S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
+                                      cb->cb_mrt_epitch);
        } else {
                radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
                radeon_emit(cmd_buffer->cs, cb->cb_color_base);
@@ -1045,13 +1290,18 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
                        radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
                }
        }
+
+       if (radv_image_has_dcc(image)) {
+               /* Drawing with DCC enabled also compresses colorbuffers. */
+               radv_update_dcc_metadata(cmd_buffer, image, true);
+       }
 }
 
 static void
 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
                             struct radv_ds_buffer_info *ds,
                             struct radv_image *image, VkImageLayout layout,
-                            bool requires_cond_write)
+                            bool requires_cond_exec)
 {
        uint32_t db_z_info = ds->db_z_info;
        uint32_t db_z_info_reg;
@@ -1075,38 +1325,21 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
        }
 
        /* When we don't know the last fast clear value we need to emit a
-        * conditional packet, otherwise we can update DB_Z_INFO directly.
+        * conditional packet that will eventually skip the following
+        * SET_CONTEXT_REG packet.
         */
-       if (requires_cond_write) {
-               radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
-
-               const uint32_t write_space = 0 << 8;    /* register */
-               const uint32_t poll_space = 1 << 4;     /* memory */
-               const uint32_t function = 3 << 0;       /* equal to the reference */
-               const uint32_t options = write_space | poll_space | function;
-               radeon_emit(cmd_buffer->cs, options);
-
-               /* poll address - location of the depth clear value */
+       if (requires_cond_exec) {
                uint64_t va = radv_buffer_get_va(image->bo);
-               va += image->offset + image->clear_value_offset;
-
-               /* In presence of stencil format, we have to adjust the base
-                * address because the first value is the stencil clear value.
-                */
-               if (vk_format_is_stencil(image->vk_format))
-                       va += 4;
+               va += image->offset + image->tc_compat_zrange_offset;
 
+               radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
                radeon_emit(cmd_buffer->cs, va);
                radeon_emit(cmd_buffer->cs, va >> 32);
-
-               radeon_emit(cmd_buffer->cs, fui(0.0f));          /* reference value */
-               radeon_emit(cmd_buffer->cs, (uint32_t)-1);       /* comparison mask */
-               radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
-               radeon_emit(cmd_buffer->cs, 0u);                 /* write address high */
-               radeon_emit(cmd_buffer->cs, db_z_info);
-       } else {
-               radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
+               radeon_emit(cmd_buffer->cs, 0);
+               radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
        }
+
+       radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
 }
 
 static void
@@ -1193,10 +1426,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
        if (!framebuffer || !subpass)
                return;
 
-       att_idx = subpass->depth_stencil_attachment.attachment;
-       if (att_idx == VK_ATTACHMENT_UNUSED)
+       if (!subpass->depth_stencil_attachment)
                return;
 
+       att_idx = subpass->depth_stencil_attachment->attachment;
        att = &framebuffer->attachments[att_idx];
        if (att->attachment->image != image)
                return;
@@ -1210,11 +1443,13 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
         */
        if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
            ds_clear_value.depth == 0.0) {
-               VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+               VkImageLayout layout = subpass->depth_stencil_attachment->layout;
 
                radv_update_zrange_precision(cmd_buffer, &att->ds, image,
                                             layout, false);
        }
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
 /**
@@ -1241,8 +1476,8 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
        if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
                ++reg_count;
 
-       radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
-       radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+       radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating));
+       radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
                        S_370_WR_CONFIRM(1) |
                        S_370_ENGINE_SEL(V_370_PFP));
        radeon_emit(cs, va);
@@ -1253,6 +1488,44 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
                radeon_emit(cs, fui(ds_clear_value.depth));
 }
 
+/**
+ * Update the TC-compat metadata value for this image.
+ */
+static void
+radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
+                                  struct radv_image *image,
+                                  uint32_t value)
+{
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       uint64_t va = radv_buffer_get_va(image->bo);
+       va += image->offset + image->tc_compat_zrange_offset;
+
+       radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
+       radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
+                       S_370_WR_CONFIRM(1) |
+                       S_370_ENGINE_SEL(V_370_PFP));
+       radeon_emit(cs, va);
+       radeon_emit(cs, va >> 32);
+       radeon_emit(cs, value);
+}
+
+static void
+radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
+                                     struct radv_image *image,
+                                     VkClearDepthStencilValue ds_clear_value)
+{
+       uint64_t va = radv_buffer_get_va(image->bo);
+       va += image->offset + image->tc_compat_zrange_offset;
+       uint32_t cond_val;
+
+       /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
+        * depth clear value is 0.0f.
+        */
+       cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
+
+       radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
+}
+
 /**
  * Update the clear depth/stencil values for this image.
  */
@@ -1266,6 +1539,12 @@ radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
 
        radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
 
+       if (radv_image_is_tc_compat_htile(image) &&
+           (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
+               radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
+                                                     ds_clear_value);
+       }
+
        radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
                                        aspects);
 }
@@ -1296,17 +1575,27 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
        if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
                ++reg_count;
 
-       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
-       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
-                       COPY_DATA_DST_SEL(COPY_DATA_REG) |
-                       (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
-       radeon_emit(cs, va);
-       radeon_emit(cs, va >> 32);
-       radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
-       radeon_emit(cs, 0);
+       uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
 
-       radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
-       radeon_emit(cs, 0);
+       if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
+               radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+               radeon_emit(cs, va);
+               radeon_emit(cs, va >> 32);
+               radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
+               radeon_emit(cs, reg_count);
+       } else {
+               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+                               COPY_DATA_DST_SEL(COPY_DATA_REG) |
+                               (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
+               radeon_emit(cs, va);
+               radeon_emit(cs, va >> 32);
+               radeon_emit(cs, reg >> 2);
+               radeon_emit(cs, 0);
+
+               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+               radeon_emit(cs, 0);
+       }
 }
 
 /*
@@ -1315,9 +1604,31 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
  * cmask eliminate is required.
  */
 void
-radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
-                                 struct radv_image *image,
-                                 bool value)
+radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
+                        struct radv_image *image, bool value)
+{
+       uint64_t pred_val = value;
+       uint64_t va = radv_buffer_get_va(image->bo);
+       va += image->offset + image->fce_pred_offset;
+
+       assert(radv_image_has_dcc(image));
+
+       radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+       radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
+                                   S_370_WR_CONFIRM(1) |
+                                   S_370_ENGINE_SEL(V_370_PFP));
+       radeon_emit(cmd_buffer->cs, va);
+       radeon_emit(cmd_buffer->cs, va >> 32);
+       radeon_emit(cmd_buffer->cs, pred_val);
+       radeon_emit(cmd_buffer->cs, pred_val >> 32);
+}
+
+/**
+ * Update the DCC predicate to reflect the compression state.
+ */
+void
+radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
+                        struct radv_image *image, bool value)
 {
        uint64_t pred_val = value;
        uint64_t va = radv_buffer_get_va(image->bo);
@@ -1326,7 +1637,7 @@ radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
        assert(radv_image_has_dcc(image));
 
        radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
-       radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+       radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
                                    S_370_WR_CONFIRM(1) |
                                    S_370_ENGINE_SEL(V_370_PFP));
        radeon_emit(cmd_buffer->cs, va);
@@ -1364,6 +1675,8 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
        radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
        radeon_emit(cs, color_values[0]);
        radeon_emit(cs, color_values[1]);
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
 /**
@@ -1381,8 +1694,8 @@ radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
 
        assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
 
-       radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
-       radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+       radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, cmd_buffer->state.predicating));
+       radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
                        S_370_WR_CONFIRM(1) |
                        S_370_ENGINE_SEL(V_370_PFP));
        radeon_emit(cs, va);
@@ -1426,17 +1739,25 @@ radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
 
        uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
 
-       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
-       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
-                       COPY_DATA_DST_SEL(COPY_DATA_REG) |
-                       COPY_DATA_COUNT_SEL);
-       radeon_emit(cs, va);
-       radeon_emit(cs, va >> 32);
-       radeon_emit(cs, reg >> 2);
-       radeon_emit(cs, 0);
+       if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
+               radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+               radeon_emit(cs, va);
+               radeon_emit(cs, va >> 32);
+               radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
+               radeon_emit(cs, 2);
+       } else {
+               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
+               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+                               COPY_DATA_DST_SEL(COPY_DATA_REG) |
+                               COPY_DATA_COUNT_SEL);
+               radeon_emit(cs, va);
+               radeon_emit(cs, va >> 32);
+               radeon_emit(cs, reg >> 2);
+               radeon_emit(cs, 0);
 
-       radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
-       radeon_emit(cs, 0);
+               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+               radeon_emit(cs, 0);
+       }
 }
 
 static void
@@ -1445,6 +1766,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
        int i;
        struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
        const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+       unsigned num_bpp64_colorbufs = 0;
 
        /* this may happen for inherited secondary recording */
        if (!framebuffer)
@@ -1464,15 +1786,19 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
 
                radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
 
-               assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
+               assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
+                                                      VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
                radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
 
                radv_load_color_clear_metadata(cmd_buffer, image, i);
+
+               if (image->planes[0].surface.bpe >= 8)
+                       num_bpp64_colorbufs++;
        }
 
-       if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
-               int idx = subpass->depth_stencil_attachment.attachment;
-               VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+       if (subpass->depth_stencil_attachment) {
+               int idx = subpass->depth_stencil_attachment->attachment;
+               VkImageLayout layout = subpass->depth_stencil_attachment->layout;
                struct radv_attachment_info *att = &framebuffer->attachments[idx];
                struct radv_image *image = att->attachment->image;
                radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
@@ -1503,6 +1829,23 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
                               S_028208_BR_X(framebuffer->width) |
                               S_028208_BR_Y(framebuffer->height));
 
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
+               uint8_t watermark = 4; /* Default value for GFX8. */
+
+               /* For optimal DCC performance. */
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+                       if (num_bpp64_colorbufs >= 5) {
+                               watermark = 8;
+                       } else {
+                               watermark = 6;
+                       }
+               }
+
+               radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
+                                      S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
+                                      S_028424_OVERWRITE_COMBINER_WATERMARK(watermark));
+       }
+
        if (cmd_buffer->device->dfsm_allowed) {
                radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
@@ -1548,7 +1891,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
        uint32_t db_count_control;
 
        if(!cmd_buffer->state.active_occlusion_queries) {
-               if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
                        if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
                            pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
                            has_perfect_queries) {
@@ -1567,7 +1910,7 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
                const struct radv_subpass *subpass = cmd_buffer->state.subpass;
                uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
 
-               if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
                        db_count_control =
                                S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
                                S_028004_SAMPLE_RATE(sample_rate) |
@@ -1596,6 +1939,8 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
        }
 
        radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
 }
 
 static void
@@ -1630,6 +1975,9 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
        if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
                radv_emit_discard_rectangle(cmd_buffer);
 
+       if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
+               radv_emit_sample_locations(cmd_buffer);
+
        cmd_buffer->state.dirty &= ~states;
 }
 
@@ -1657,7 +2005,7 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
 {
        struct radv_descriptor_state *descriptors_state =
                radv_get_descriptors_state(cmd_buffer, bind_point);
-       uint32_t size = MAX_SETS * 2 * 4;
+       uint32_t size = MAX_SETS * 4;
        uint32_t offset;
        void *ptr;
        
@@ -1666,13 +2014,12 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
                return;
 
        for (unsigned i = 0; i < MAX_SETS; i++) {
-               uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
+               uint32_t *uptr = ((uint32_t *)ptr) + i;
                uint64_t set_va = 0;
                struct radv_descriptor_set *set = descriptors_state->sets[i];
                if (descriptors_state->valid & (1u << i))
                        set_va = set->va;
                uptr[0] = set_va & 0xffffffff;
-               uptr[1] = set_va >> 32;
        }
 
        uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
@@ -1714,6 +2061,8 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
                                         VK_PIPELINE_BIND_POINT_GRAPHICS;
        struct radv_descriptor_state *descriptors_state =
                radv_get_descriptors_state(cmd_buffer, bind_point);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       bool flush_indirect_descriptors;
 
        if (!descriptors_state->dirty)
                return;
@@ -1721,10 +2070,14 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
        if (descriptors_state->push_dirty)
                radv_flush_push_descriptors(cmd_buffer, bind_point);
 
-       if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
-           (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
+       flush_indirect_descriptors =
+               (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
+                state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
+               (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
+                state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
+
+       if (flush_indirect_descriptors)
                radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
-       }
 
        MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
                                                           cmd_buffer->cs,
@@ -1752,10 +2105,10 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
        descriptors_state->dirty = 0;
        descriptors_state->push_dirty = false;
 
+       assert(cmd_buffer->cs->cdw <= cdw_max);
+
        if (unlikely(cmd_buffer->device->trace_bo))
                radv_save_descriptors(cmd_buffer, bind_point);
-
-       assert(cmd_buffer->cs->cdw <= cdw_max);
 }
 
 static void
@@ -1772,6 +2125,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
                radv_get_descriptors_state(cmd_buffer, bind_point);
        struct radv_pipeline_layout *layout = pipeline->layout;
        struct radv_shader_variant *shader, *prev_shader;
+       bool need_push_constants = false;
        unsigned offset;
        void *ptr;
        uint64_t va;
@@ -1781,37 +2135,56 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
            (!layout->push_constant_size && !layout->dynamic_offset_count))
                return;
 
-       if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
-                                         16 * layout->dynamic_offset_count,
-                                         256, &offset, &ptr))
-               return;
+       radv_foreach_stage(stage, stages) {
+               if (!pipeline->shaders[stage])
+                       continue;
 
-       memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
-       memcpy((char*)ptr + layout->push_constant_size,
-              descriptors_state->dynamic_buffers,
-              16 * layout->dynamic_offset_count);
+               need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants;
+               need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets;
 
-       va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
-       va += offset;
+               uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts;
+               uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts;
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
-                                                          cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+               radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
+                                            AC_UD_INLINE_PUSH_CONSTANTS,
+                                            count,
+                                            (uint32_t *)&cmd_buffer->push_constants[base * 4]);
+       }
 
-       prev_shader = NULL;
-       radv_foreach_stage(stage, stages) {
-               shader = radv_get_shader(pipeline, stage);
+       if (need_push_constants) {
+               if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
+                                                 16 * layout->dynamic_offset_count,
+                                                 256, &offset, &ptr))
+                       return;
+
+               memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
+               memcpy((char*)ptr + layout->push_constant_size,
+                      descriptors_state->dynamic_buffers,
+                      16 * layout->dynamic_offset_count);
+
+               va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+               va += offset;
+
+               MAYBE_UNUSED unsigned cdw_max =
+                       radeon_check_space(cmd_buffer->device->ws,
+                                          cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+
+               prev_shader = NULL;
+               radv_foreach_stage(stage, stages) {
+                       shader = radv_get_shader(pipeline, stage);
 
-               /* Avoid redundantly emitting the address for merged stages. */
-               if (shader && shader != prev_shader) {
-                       radv_emit_userdata_address(cmd_buffer, pipeline, stage,
-                                                  AC_UD_PUSH_CONSTANTS, va);
+                       /* Avoid redundantly emitting the address for merged stages. */
+                       if (shader && shader != prev_shader) {
+                               radv_emit_userdata_address(cmd_buffer, pipeline, stage,
+                                                          AC_UD_PUSH_CONSTANTS, va);
 
-                       prev_shader = shader;
+                               prev_shader = shader;
+                       }
                }
+               assert(cmd_buffer->cs->cdw <= cdw_max);
        }
 
        cmd_buffer->push_constant_stages &= ~stages;
-       assert(cmd_buffer->cs->cdw <= cdw_max);
 }
 
 static void
@@ -1820,13 +2193,13 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
 {
        if ((pipeline_is_dirty ||
            (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
-           cmd_buffer->state.pipeline->vertex_elements.count &&
+           cmd_buffer->state.pipeline->num_vertex_bindings &&
            radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
                struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
                unsigned vb_offset;
                void *vb_ptr;
                uint32_t i = 0;
-               uint32_t count = velems->count;
+               uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
                uint64_t va;
 
                /* allocate some descriptor state for vertex buffers */
@@ -1837,21 +2210,28 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
                for (i = 0; i < count; i++) {
                        uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
                        uint32_t offset;
-                       int vb = velems->binding[i];
-                       struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
-                       uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
+                       struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
+                       uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+
+                       if (!buffer)
+                               continue;
 
                        va = radv_buffer_get_va(buffer->bo);
 
-                       offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
+                       offset = cmd_buffer->vertex_bindings[i].offset;
                        va += offset + buffer->offset;
                        desc[0] = va;
                        desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
-                       if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
+                       if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
                                desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
                        else
                                desc[2] = buffer->size - offset;
-                       desc[3] = velems->rsrc_word3[i];
+                       desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+                                 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+                                 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+                                 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+                                 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
+                                 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
                }
 
                va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
@@ -1868,35 +2248,173 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
 }
 
 static void
-radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
+radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
 {
-       radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
-       radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
-       radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+       struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+       struct radv_userdata_info *loc;
+       uint32_t base_reg;
+
+       for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
+               if (!radv_get_shader(pipeline, stage))
+                       continue;
+
+               loc = radv_lookup_user_sgpr(pipeline, stage,
+                                           AC_UD_STREAMOUT_BUFFERS);
+               if (loc->sgpr_idx == -1)
+                       continue;
+
+               base_reg = pipeline->user_data_0[stage];
+
+               radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+                                        base_reg + loc->sgpr_idx * 4, va, false);
+       }
+
+       if (pipeline->gs_copy_shader) {
+               loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
+               if (loc->sgpr_idx != -1) {
+                       base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
+
+                       radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+                                                base_reg + loc->sgpr_idx * 4, va, false);
+               }
+       }
 }
 
 static void
-radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
-                        bool instanced_draw, bool indirect_draw,
-                        uint32_t draw_vertex_count)
+radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
 {
-       struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
-       struct radv_cmd_state *state = &cmd_buffer->state;
-       struct radeon_cmdbuf *cs = cmd_buffer->cs;
-       uint32_t ia_multi_vgt_param;
+       if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
+               struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+               struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+               unsigned so_offset;
+               void *so_ptr;
+               uint64_t va;
+
+               /* Allocate some descriptor state for streamout buffers. */
+               if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
+                                                 MAX_SO_BUFFERS * 16, 256,
+                                                 &so_offset, &so_ptr))
+                       return;
+
+               for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
+                       struct radv_buffer *buffer = sb[i].buffer;
+                       uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
+
+                       if (!(so->enabled_mask & (1 << i)))
+                               continue;
+
+                       va = radv_buffer_get_va(buffer->bo) + buffer->offset;
+
+                       va += sb[i].offset;
+
+                       /* Set the descriptor.
+                        *
+                        * On GFX8, the format must be non-INVALID, otherwise
+                        * the buffer will be considered not bound and store
+                        * instructions will be no-ops.
+                        */
+                       desc[0] = va;
+                       desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
+                       desc[2] = 0xffffffff;
+                       desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+                                 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+                                 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+                                 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+                                 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+               }
+
+               va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+               va += so_offset;
+
+               radv_emit_streamout_buffers(cmd_buffer, va);
+       }
+
+       cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
+}
+
+static void
+radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
+{
+       radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
+       radv_flush_streamout_descriptors(cmd_buffer);
+       radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+       radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+}
+
+struct radv_draw_info {
+       /**
+        * Number of vertices.
+        */
+       uint32_t count;
+
+       /**
+        * Index of the first vertex.
+        */
+       int32_t vertex_offset;
+
+       /**
+        * First instance id.
+        */
+       uint32_t first_instance;
+
+       /**
+        * Number of instances.
+        */
+       uint32_t instance_count;
+
+       /**
+        * First index (indexed draws only).
+        */
+       uint32_t first_index;
+
+       /**
+        * Whether it's an indexed draw.
+        */
+       bool indexed;
+
+       /**
+        * Indirect draw parameters resource.
+        */
+       struct radv_buffer *indirect;
+       uint64_t indirect_offset;
+       uint32_t stride;
+
+       /**
+        * Draw count parameters resource.
+        */
+       struct radv_buffer *count_buffer;
+       uint64_t count_buffer_offset;
+
+       /**
+        * Stream output parameters resource.
+        */
+       struct radv_buffer *strmout_buffer;
+       uint64_t strmout_buffer_offset;
+};
+
+static void
+radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
+                        const struct radv_draw_info *draw_info)
+{
+       struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       uint32_t ia_multi_vgt_param;
        int32_t primitive_reset_en;
 
        /* Draw state. */
        ia_multi_vgt_param =
-               si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
-                                         indirect_draw, draw_vertex_count);
+               si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
+                                         draw_info->indirect,
+                                         !!draw_info->strmout_buffer,
+                                         draw_info->indirect ? 0 : draw_info->count);
 
        if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
                if (info->chip_class >= GFX9) {
                        radeon_set_uconfig_reg_idx(cs,
                                                   R_030960_IA_MULTI_VGT_PARAM,
                                                   4, ia_multi_vgt_param);
-               } else if (info->chip_class >= CIK) {
+               } else if (info->chip_class >= GFX7) {
                        radeon_set_context_reg_idx(cs,
                                                   R_028AA8_IA_MULTI_VGT_PARAM,
                                                   1, ia_multi_vgt_param);
@@ -1909,7 +2427,7 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
 
        /* Primitive restart. */
        primitive_reset_en =
-               indexed_draw && state->pipeline->graphics.prim_restart_enable;
+               draw_info->indexed && state->pipeline->graphics.prim_restart_enable;
 
        if (primitive_reset_en != state->last_primitive_reset_en) {
                state->last_primitive_reset_en = primitive_reset_en;
@@ -1935,6 +2453,27 @@ radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
                        state->last_primitive_reset_index = primitive_reset_index;
                }
        }
+
+       if (draw_info->strmout_buffer) {
+               uint64_t va = radv_buffer_get_va(draw_info->strmout_buffer->bo);
+
+               va += draw_info->strmout_buffer->offset +
+                     draw_info->strmout_buffer_offset;
+
+               radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+                                      draw_info->stride);
+
+               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+                               COPY_DATA_DST_SEL(COPY_DATA_REG) |
+                               COPY_DATA_WR_CONFIRM);
+               radeon_emit(cs, va);
+               radeon_emit(cs, va >> 32);
+               radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
+               radeon_emit(cs, 0); /* unused */
+
+               radv_cs_add_buffer(cmd_buffer->device->ws, cs, draw_info->strmout_buffer->bo);
+       }
 }
 
 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
@@ -1961,7 +2500,8 @@ static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
                                     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
                                     VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
                                     VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
-                                    VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
+                                    VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+                                    VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
                cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
        }
 }
@@ -1985,6 +2525,8 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
        for_each_bit(b, src_flags) {
                switch ((VkAccessFlagBits)(1 << b)) {
                case VK_ACCESS_SHADER_WRITE_BIT:
+               case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
+               case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
                        flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
                        break;
                case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
@@ -2019,34 +2561,69 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
                       VkAccessFlags dst_flags,
                       struct radv_image *image)
 {
+       bool flush_CB_meta = true, flush_DB_meta = true;
        enum radv_cmd_flush_bits flush_bits = 0;
+       bool flush_CB = true, flush_DB = true;
+       bool image_is_coherent = false;
        uint32_t b;
+
+       if (image) {
+               if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+                       flush_CB = false;
+                       flush_DB = false;
+               }
+
+               if (!radv_image_has_CB_metadata(image))
+                       flush_CB_meta = false;
+               if (!radv_image_has_htile(image))
+                       flush_DB_meta = false;
+
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+                       if (image->info.samples == 1 &&
+                           (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+                                            VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
+                           !vk_format_is_stencil(image->vk_format)) {
+                               /* Single-sample color and single-sample depth
+                                * (not stencil) are coherent with shaders on
+                                * GFX9.
+                                */
+                               image_is_coherent = true;
+                       }
+               }
+       }
+
        for_each_bit(b, dst_flags) {
                switch ((VkAccessFlagBits)(1 << b)) {
                case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
                case VK_ACCESS_INDEX_READ_BIT:
+               case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
                        break;
                case VK_ACCESS_UNIFORM_READ_BIT:
                        flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
                        break;
                case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
-               case VK_ACCESS_SHADER_READ_BIT:
                case VK_ACCESS_TRANSFER_READ_BIT:
                case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
                        flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
                                      RADV_CMD_FLAG_INV_GLOBAL_L2;
                        break;
+               case VK_ACCESS_SHADER_READ_BIT:
+                       flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
+
+                       if (!image_is_coherent)
+                               flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
+                       break;
                case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
-                       /* TODO: change to image && when the image gets passed
-                        * through from the subpass. */
-                       if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
-                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
-                                             RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+                       if (flush_CB)
+                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+                       if (flush_CB_meta)
+                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
                        break;
                case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
-                       if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
-                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
-                                             RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+                       if (flush_DB)
+                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+                       if (flush_DB_meta)
+                               flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
                        break;
                default:
                        break;
@@ -2077,11 +2654,21 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf
        range.baseArrayLayer = view->base_layer;
        range.layerCount = cmd_buffer->state.framebuffer->layers;
 
+       if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+               /* If the current subpass uses multiview, the driver might have
+                * performed a fast color/depth clear to the whole image
+                * (including all layers). To make sure the driver will
+                * decompress the image correctly (if needed), we have to
+                * account for the "real" number of layers. If the view mask is
+                * sparse, this will decompress more layers than needed.
+                */
+               range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
+       }
+
        radv_handle_image_transition(cmd_buffer,
                                     view->image,
                                     cmd_buffer->state.attachments[idx].current_layout,
-                                    att.layout, 0, 0, &range,
-                                    cmd_buffer->state.attachments[idx].pending_clear_aspects);
+                                    att.layout, 0, 0, &range);
 
        cmd_buffer->state.attachments[idx].current_layout = att.layout;
 
@@ -2090,28 +2677,8 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf
 
 void
 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
-                           const struct radv_subpass *subpass, bool transitions)
+                           const struct radv_subpass *subpass)
 {
-       if (transitions) {
-               radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
-
-               for (unsigned i = 0; i < subpass->color_count; ++i) {
-                       if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
-                               radv_handle_subpass_image_transition(cmd_buffer,
-                                                                    subpass->color_attachments[i]);
-               }
-
-               for (unsigned i = 0; i < subpass->input_count; ++i) {
-                       radv_handle_subpass_image_transition(cmd_buffer,
-                                                       subpass->input_attachments[i]);
-               }
-
-               if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
-                       radv_handle_subpass_image_transition(cmd_buffer,
-                                                       subpass->depth_stencil_attachment);
-               }
-       }
-
        cmd_buffer->state.subpass = subpass;
 
        cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
@@ -2256,20 +2823,6 @@ VkResult radv_ResetCommandBuffer(
        return radv_reset_cmd_buffer(cmd_buffer);
 }
 
-static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
-{
-       struct radv_device *device = cmd_buffer->device;
-       if (device->gfx_init) {
-               uint64_t va = radv_buffer_get_va(device->gfx_init);
-               radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init);
-               radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
-               radeon_emit(cmd_buffer->cs, va);
-               radeon_emit(cmd_buffer->cs, va >> 32);
-               radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
-       } else
-               si_init_config(cmd_buffer);
-}
-
 VkResult radv_BeginCommandBuffer(
        VkCommandBuffer commandBuffer,
        const VkCommandBufferBeginInfo *pBeginInfo)
@@ -2292,23 +2845,9 @@ VkResult radv_BeginCommandBuffer(
        cmd_buffer->state.last_num_instances = -1;
        cmd_buffer->state.last_vertex_offset = -1;
        cmd_buffer->state.last_first_instance = -1;
+       cmd_buffer->state.predication_type = -1;
        cmd_buffer->usage_flags = pBeginInfo->flags;
 
-       /* setup initial configuration into command buffer */
-       if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
-               switch (cmd_buffer->queue_family_index) {
-               case RADV_QUEUE_GENERAL:
-                       emit_gfx_buffer_state(cmd_buffer);
-                       break;
-               case RADV_QUEUE_COMPUTE:
-                       si_init_compute(cmd_buffer);
-                       break;
-               case RADV_QUEUE_TRANSFER:
-               default:
-                       break;
-               }
-       }
-
        if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
            (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
                assert(pBeginInfo->pInheritanceInfo);
@@ -2322,7 +2861,7 @@ VkResult radv_BeginCommandBuffer(
                if (result != VK_SUCCESS)
                        return result;
 
-               radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
+               radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
        }
 
        if (unlikely(cmd_buffer->device->trace_bo)) {
@@ -2568,7 +3107,7 @@ void radv_CmdPushDescriptorSetKHR(
 
 void radv_CmdPushDescriptorSetWithTemplateKHR(
        VkCommandBuffer                             commandBuffer,
-       VkDescriptorUpdateTemplateKHR               descriptorUpdateTemplate,
+       VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,
        VkPipelineLayout                            _layout,
        uint32_t                                    set,
        const void*                                 pData)
@@ -2612,8 +3151,14 @@ VkResult radv_EndCommandBuffer(
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
 
        if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
-               if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
+               if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
                        cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+
+               /* Make sure to sync all pending active queries at the end of
+                * command buffer.
+                */
+               cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
                si_emit_cache_flush(cmd_buffer);
        }
 
@@ -2640,6 +3185,8 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
        if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
                return;
 
+       assert(!pipeline->ctx_cs.cdw);
+
        cmd_buffer->state.emitted_compute_pipeline = pipeline;
 
        radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
@@ -2702,6 +3249,7 @@ void radv_CmdBindPipeline(
                cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
 
                radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
+               radv_bind_streamout_state(cmd_buffer, pipeline);
 
                if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
                        cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
@@ -2730,6 +3278,11 @@ void radv_CmdSetViewport(
        assert(firstViewport < MAX_VIEWPORTS);
        assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
 
+       if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
+                   pViewports, viewportCount * sizeof(*pViewports))) {
+               return;
+       }
+
        memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
               viewportCount * sizeof(*pViewports));
 
@@ -2749,6 +3302,11 @@ void radv_CmdSetScissor(
        assert(firstScissor < MAX_SCISSORS);
        assert(total_count >= 1 && total_count <= MAX_SCISSORS);
 
+       if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
+                   scissorCount * sizeof(*pScissors))) {
+               return;
+       }
+
        memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
               scissorCount * sizeof(*pScissors));
 
@@ -2760,6 +3318,10 @@ void radv_CmdSetLineWidth(
        float                                       lineWidth)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+       if (cmd_buffer->state.dynamic.line_width == lineWidth)
+               return;
+
        cmd_buffer->state.dynamic.line_width = lineWidth;
        cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
 }
@@ -2771,12 +3333,19 @@ void radv_CmdSetDepthBias(
        float                                       depthBiasSlopeFactor)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+
+       if (state->dynamic.depth_bias.bias == depthBiasConstantFactor &&
+           state->dynamic.depth_bias.clamp == depthBiasClamp &&
+           state->dynamic.depth_bias.slope == depthBiasSlopeFactor) {
+               return;
+       }
 
-       cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
-       cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
-       cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
+       state->dynamic.depth_bias.bias = depthBiasConstantFactor;
+       state->dynamic.depth_bias.clamp = depthBiasClamp;
+       state->dynamic.depth_bias.slope = depthBiasSlopeFactor;
 
-       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
 }
 
 void radv_CmdSetBlendConstants(
@@ -2784,11 +3353,14 @@ void radv_CmdSetBlendConstants(
        const float                                 blendConstants[4])
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+
+       if (!memcmp(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4))
+               return;
 
-       memcpy(cmd_buffer->state.dynamic.blend_constants,
-              blendConstants, sizeof(float) * 4);
+       memcpy(state->dynamic.blend_constants, blendConstants, sizeof(float) * 4);
 
-       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
 }
 
 void radv_CmdSetDepthBounds(
@@ -2797,11 +3369,17 @@ void radv_CmdSetDepthBounds(
        float                                       maxDepthBounds)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
 
-       cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
-       cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+       if (state->dynamic.depth_bounds.min == minDepthBounds &&
+           state->dynamic.depth_bounds.max == maxDepthBounds) {
+               return;
+       }
 
-       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+       state->dynamic.depth_bounds.min = minDepthBounds;
+       state->dynamic.depth_bounds.max = maxDepthBounds;
+
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
 }
 
 void radv_CmdSetStencilCompareMask(
@@ -2810,13 +3388,21 @@ void radv_CmdSetStencilCompareMask(
        uint32_t                                    compareMask)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       bool front_same = state->dynamic.stencil_compare_mask.front == compareMask;
+       bool back_same = state->dynamic.stencil_compare_mask.back == compareMask;
+
+       if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
+           (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
+               return;
+       }
 
        if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-               cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+               state->dynamic.stencil_compare_mask.front = compareMask;
        if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-               cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+               state->dynamic.stencil_compare_mask.back = compareMask;
 
-       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
 }
 
 void radv_CmdSetStencilWriteMask(
@@ -2825,13 +3411,21 @@ void radv_CmdSetStencilWriteMask(
        uint32_t                                    writeMask)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       bool front_same = state->dynamic.stencil_write_mask.front == writeMask;
+       bool back_same = state->dynamic.stencil_write_mask.back == writeMask;
+
+       if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
+           (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
+               return;
+       }
 
        if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-               cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+               state->dynamic.stencil_write_mask.front = writeMask;
        if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-               cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+               state->dynamic.stencil_write_mask.back = writeMask;
 
-       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
 }
 
 void radv_CmdSetStencilReference(
@@ -2840,6 +3434,14 @@ void radv_CmdSetStencilReference(
        uint32_t                                    reference)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       bool front_same = state->dynamic.stencil_reference.front == reference;
+       bool back_same = state->dynamic.stencil_reference.back == reference;
+
+       if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
+           (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) {
+               return;
+       }
 
        if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
                cmd_buffer->state.dynamic.stencil_reference.front = reference;
@@ -2862,12 +3464,36 @@ void radv_CmdSetDiscardRectangleEXT(
        assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
        assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
 
+       if (!memcmp(state->dynamic.discard_rectangle.rectangles + firstDiscardRectangle,
+                   pDiscardRectangles, discardRectangleCount * sizeof(*pDiscardRectangles))) {
+               return;
+       }
+
        typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
                     pDiscardRectangles, discardRectangleCount);
 
        state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
 }
 
+void radv_CmdSetSampleLocationsEXT(
+       VkCommandBuffer                             commandBuffer,
+       const VkSampleLocationsInfoEXT*             pSampleLocationsInfo)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_cmd_state *state = &cmd_buffer->state;
+
+       assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
+
+       state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
+       state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
+       state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
+       typed_memcpy(&state->dynamic.sample_location.locations[0],
+                    pSampleLocationsInfo->pSampleLocations,
+                    pSampleLocationsInfo->sampleLocationsCount);
+
+       state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
+}
+
 void radv_CmdExecuteCommands(
        VkCommandBuffer                             commandBuffer,
        uint32_t                                    commandBufferCount,
@@ -3027,7 +3653,7 @@ VkResult radv_ResetCommandPool(
 void radv_TrimCommandPool(
     VkDevice                                    device,
     VkCommandPool                               commandPool,
-    VkCommandPoolTrimFlagsKHR                   flags)
+    VkCommandPoolTrimFlags                      flags)
 {
        RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
 
@@ -3040,6 +3666,69 @@ void radv_TrimCommandPool(
        }
 }
 
+static uint32_t
+radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
+{
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       uint32_t subpass_id = state->subpass - state->pass->subpasses;
+
+       /* The id of this subpass shouldn't exceed the number of subpasses in
+        * this render pass minus 1.
+        */
+       assert(subpass_id < state->pass->subpass_count);
+       return subpass_id;
+}
+
+static void
+radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
+                             uint32_t subpass_id)
+{
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
+
+       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+                                                          cmd_buffer->cs, 4096);
+
+       radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
+
+       for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+               const uint32_t a = subpass->attachments[i].attachment;
+               if (a == VK_ATTACHMENT_UNUSED)
+                       continue;
+
+               radv_handle_subpass_image_transition(cmd_buffer,
+                                                    subpass->attachments[i]);
+       }
+
+       radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+       radv_cmd_buffer_clear_subpass(cmd_buffer);
+
+       assert(cmd_buffer->cs->cdw <= cdw_max);
+}
+
+static void
+radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
+{
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       const struct radv_subpass *subpass = state->subpass;
+       uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
+
+       radv_cmd_buffer_resolve_subpass(cmd_buffer);
+
+       for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+               const uint32_t a = subpass->attachments[i].attachment;
+               if (a == VK_ATTACHMENT_UNUSED)
+                       continue;
+
+               if (state->pass->attachments[a].last_subpass_idx != subpass_id)
+                       continue;
+
+               VkImageLayout layout = state->pass->attachments[a].final_layout;
+               radv_handle_subpass_image_transition(cmd_buffer,
+                                     (struct radv_subpass_attachment){a, layout});
+       }
+}
+
 void radv_CmdBeginRenderPass(
        VkCommandBuffer                             commandBuffer,
        const VkRenderPassBeginInfo*                pRenderPassBegin,
@@ -3048,10 +3737,7 @@ void radv_CmdBeginRenderPass(
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
        RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
        RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
-
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
-                                                          cmd_buffer->cs, 2048);
-       MAYBE_UNUSED VkResult result;
+       VkResult result;
 
        cmd_buffer->state.framebuffer = framebuffer;
        cmd_buffer->state.pass = pass;
@@ -3061,10 +3747,7 @@ void radv_CmdBeginRenderPass(
        if (result != VK_SUCCESS)
                return;
 
-       radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
-       assert(cmd_buffer->cs->cdw <= cdw_max);
-
-       radv_cmd_buffer_clear_subpass(cmd_buffer);
+       radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
 }
 
 void radv_CmdBeginRenderPass2KHR(
@@ -3082,13 +3765,9 @@ void radv_CmdNextSubpass(
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
 
-       radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
-       radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
-                                             2048);
-
-       radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
-       radv_cmd_buffer_clear_subpass(cmd_buffer);
+       uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
+       radv_cmd_buffer_end_subpass(cmd_buffer);
+       radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
 }
 
 void radv_CmdNextSubpass2KHR(
@@ -3124,12 +3803,13 @@ static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned in
 
 static void
 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
-                         uint32_t vertex_count)
+                         uint32_t vertex_count,
+                        bool use_opaque)
 {
        radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
        radeon_emit(cmd_buffer->cs, vertex_count);
        radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
-                                   S_0287F0_USE_OPAQUE(0));
+                                   S_0287F0_USE_OPAQUE(use_opaque));
 }
 
 static void
@@ -3190,51 +3870,6 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
        }
 }
 
-struct radv_draw_info {
-       /**
-        * Number of vertices.
-        */
-       uint32_t count;
-
-       /**
-        * Index of the first vertex.
-        */
-       int32_t vertex_offset;
-
-       /**
-        * First instance id.
-        */
-       uint32_t first_instance;
-
-       /**
-        * Number of instances.
-        */
-       uint32_t instance_count;
-
-       /**
-        * First index (indexed draws only).
-        */
-       uint32_t first_index;
-
-       /**
-        * Whether it's an indexed draw.
-        */
-       bool indexed;
-
-       /**
-        * Indirect draw parameters resource.
-        */
-       struct radv_buffer *indirect;
-       uint64_t indirect_offset;
-       uint32_t stride;
-
-       /**
-        * Draw count parameters resource.
-        */
-       struct radv_buffer *count_buffer;
-       uint64_t count_buffer_offset;
-};
-
 static void
 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
                       const struct radv_draw_info *info)
@@ -3327,14 +3962,17 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
                        }
                } else {
                        if (!state->subpass->view_mask) {
-                               radv_cs_emit_draw_packet(cmd_buffer, info->count);
+                               radv_cs_emit_draw_packet(cmd_buffer,
+                                                        info->count,
+                                                        !!info->strmout_buffer);
                        } else {
                                unsigned i;
                                for_each_bit(i, state->subpass->view_mask) {
                                        radv_emit_view_index(cmd_buffer, i);
 
                                        radv_cs_emit_draw_packet(cmd_buffer,
-                                                                info->count);
+                                                                info->count,
+                                                                !!info->strmout_buffer);
                                }
                        }
                }
@@ -3358,26 +3996,30 @@ radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
  * any context registers.
  */
 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
-                                            bool indexed_draw)
+                                            const struct radv_draw_info *info)
 {
        struct radv_cmd_state *state = &cmd_buffer->state;
 
        if (!cmd_buffer->device->physical_device->has_scissor_bug)
                return false;
 
+       if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
+               return true;
+
        uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
 
-       /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
-       used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
+       /* Index, vertex and streamout buffers don't change context regs, and
+        * pipeline is already handled.
+        */
+       used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
+                        RADV_CMD_DIRTY_VERTEX_BUFFER |
+                        RADV_CMD_DIRTY_STREAMOUT_BUFFER |
+                        RADV_CMD_DIRTY_PIPELINE);
 
-       /* Assume all state changes except  these two can imply context rolls. */
        if (cmd_buffer->state.dirty & used_states)
                return true;
 
-       if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
-               return true;
-
-       if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
+       if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
            (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
                return true;
 
@@ -3388,7 +4030,7 @@ static void
 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
                              const struct radv_draw_info *info)
 {
-       bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
+       bool late_scissor_emission;
 
        if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
            cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
@@ -3397,6 +4039,12 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
        if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
                radv_emit_graphics_pipeline(cmd_buffer);
 
+       /* This should be before the cmd_buffer->state.dirty is cleared
+        * (excluding RADV_CMD_DIRTY_PIPELINE) and after
+        * cmd_buffer->state.context_roll_without_scissor_emitted is set. */
+       late_scissor_emission =
+               radv_need_late_scissor_emission(cmd_buffer, info);
+
        if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
                radv_emit_framebuffer_state(cmd_buffer);
 
@@ -3404,11 +4052,11 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
                if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
                        radv_emit_index_buffer(cmd_buffer);
        } else {
-               /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
+               /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
                 * so the state must be re-emitted before the next indexed
                 * draw.
                 */
-               if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
                        cmd_buffer->state.last_index_type = -1;
                        cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
                }
@@ -3416,9 +4064,7 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
 
        radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
 
-       radv_emit_draw_registers(cmd_buffer, info->indexed,
-                                info->instance_count > 1, info->indirect,
-                                info->indirect ? 0 : info->count);
+       radv_emit_draw_registers(cmd_buffer, info);
 
        if (late_scissor_emission)
                radv_emit_scissor(cmd_buffer);
@@ -3428,8 +4074,10 @@ static void
 radv_draw(struct radv_cmd_buffer *cmd_buffer,
          const struct radv_draw_info *info)
 {
+       struct radeon_info *rad_info =
+               &cmd_buffer->device->physical_device->rad_info;
        bool has_prefetch =
-               cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+               cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
        bool pipeline_is_dirty =
                (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
                cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
@@ -3438,6 +4086,19 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer,
                radeon_check_space(cmd_buffer->device->ws,
                                   cmd_buffer->cs, 4096);
 
+       if (likely(!info->indirect)) {
+               /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
+                * no workaround for indirect draws, but we can at least skip
+                * direct draws.
+                */
+               if (unlikely(!info->instance_count))
+                       return;
+
+               /* Handle count == 0. */
+               if (unlikely(!info->count && !info->strmout_buffer))
+                       return;
+       }
+
        /* Use optimal packet order based on whether we need to sync the
         * pipeline.
         */
@@ -3497,6 +4158,16 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer,
                }
        }
 
+       /* Workaround for a VGT hang when streamout is enabled.
+        * It must be done after drawing.
+        */
+       if (cmd_buffer->state.streamout.streamout_enabled &&
+           (rad_info->family == CHIP_HAWAII ||
+            rad_info->family == CHIP_TONGA ||
+            rad_info->family == CHIP_FIJI)) {
+               cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
+       }
+
        assert(cmd_buffer->cs->cdw <= cdw_max);
        radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
 }
@@ -3579,55 +4250,6 @@ void radv_CmdDrawIndexedIndirect(
        radv_draw(cmd_buffer, &info);
 }
 
-void radv_CmdDrawIndirectCountAMD(
-       VkCommandBuffer                             commandBuffer,
-       VkBuffer                                    _buffer,
-       VkDeviceSize                                offset,
-       VkBuffer                                    _countBuffer,
-       VkDeviceSize                                countBufferOffset,
-       uint32_t                                    maxDrawCount,
-       uint32_t                                    stride)
-{
-       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-       RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
-       RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
-       struct radv_draw_info info = {};
-
-       info.count = maxDrawCount;
-       info.indirect = buffer;
-       info.indirect_offset = offset;
-       info.count_buffer = count_buffer;
-       info.count_buffer_offset = countBufferOffset;
-       info.stride = stride;
-
-       radv_draw(cmd_buffer, &info);
-}
-
-void radv_CmdDrawIndexedIndirectCountAMD(
-       VkCommandBuffer                             commandBuffer,
-       VkBuffer                                    _buffer,
-       VkDeviceSize                                offset,
-       VkBuffer                                    _countBuffer,
-       VkDeviceSize                                countBufferOffset,
-       uint32_t                                    maxDrawCount,
-       uint32_t                                    stride)
-{
-       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-       RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
-       RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
-       struct radv_draw_info info = {};
-
-       info.indexed = true;
-       info.count = maxDrawCount;
-       info.indirect = buffer;
-       info.indirect_offset = offset;
-       info.count_buffer = count_buffer;
-       info.count_buffer_offset = countBufferOffset;
-       info.stride = stride;
-
-       radv_draw(cmd_buffer, &info);
-}
-
 void radv_CmdDrawIndirectCountKHR(
        VkCommandBuffer                             commandBuffer,
        VkBuffer                                    _buffer,
@@ -3727,7 +4349,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
                if (loc->sgpr_idx != -1) {
                        for (unsigned i = 0; i < 3; ++i) {
                                radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
-                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
                                                COPY_DATA_DST_SEL(COPY_DATA_REG));
                                radeon_emit(cs, (va +  4 * i));
                                radeon_emit(cs, (va + 4 * i) >> 32);
@@ -3797,7 +4419,6 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
                }
 
                if (loc->sgpr_idx != -1) {
-                       assert(!loc->indirect);
                        assert(loc->num_sgprs == 3);
 
                        radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
@@ -3844,7 +4465,7 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
 {
        struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
        bool has_prefetch =
-               cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+               cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
        bool pipeline_is_dirty = pipeline &&
                                 pipeline != cmd_buffer->state.emitted_compute_pipeline;
 
@@ -3965,13 +4586,7 @@ void radv_CmdEndRenderPass(
 
        radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
 
-       radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
-       for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
-               VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
-               radv_handle_subpass_image_transition(cmd_buffer,
-                                     (struct radv_subpass_attachment){i, layout});
-       }
+       radv_cmd_buffer_end_subpass(cmd_buffer);
 
        vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
 
@@ -4002,19 +4617,14 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
 {
        assert(range->baseMipLevel == 0);
        assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
-       unsigned layer_count = radv_get_layerCount(image, range);
-       uint64_t size = image->surface.htile_slice_size * layer_count;
        VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
-       uint64_t offset = image->offset + image->htile_offset +
-                         image->surface.htile_slice_size * range->baseArrayLayer;
        struct radv_cmd_state *state = &cmd_buffer->state;
        VkClearDepthStencilValue value = {};
 
        state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
                             RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
 
-       state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
-                                             size, clear_word);
+       state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
 
        state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
 
@@ -4022,6 +4632,15 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
                aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
 
        radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
+
+       if (radv_image_is_tc_compat_htile(image)) {
+               /* Initialize the TC-compat metada value to 0 because by
+                * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
+                * need have to conditionally update its value when performing
+                * a fast depth clear.
+                */
+               radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
+       }
 }
 
 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
@@ -4030,16 +4649,20 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe
                                               VkImageLayout dst_layout,
                                               unsigned src_queue_mask,
                                               unsigned dst_queue_mask,
-                                              const VkImageSubresourceRange *range,
-                                              VkImageAspectFlags pending_clears)
+                                              const VkImageSubresourceRange *range)
 {
        if (!radv_image_has_htile(image))
                return;
 
-       if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
-                  radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
-               /* TODO: merge with the clear if applicable */
-               radv_initialize_htile(cmd_buffer, image, range, 0);
+       if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+               uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
+
+               if (radv_layout_is_htile_compressed(image, dst_layout,
+                                                   dst_queue_mask)) {
+                       clear_value = 0;
+               }
+
+               radv_initialize_htile(cmd_buffer, image, range, clear_value);
        } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
                   radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
                uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
@@ -4074,6 +4697,27 @@ static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
        state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
 }
 
+void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
+                          struct radv_image *image)
+{
+       struct radv_cmd_state *state = &cmd_buffer->state;
+       static const uint32_t fmask_clear_values[4] = {
+               0x00000000,
+               0x02020202,
+               0xE4E4E4E4,
+               0x76543210
+       };
+       uint32_t log2_samples = util_logbase2(image->info.samples);
+       uint32_t value = fmask_clear_values[log2_samples];
+
+       state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+                            RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+
+       state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value);
+
+       state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+}
+
 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
                         struct radv_image *image, uint32_t value)
 {
@@ -4109,17 +4753,24 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
                radv_initialise_cmask(cmd_buffer, image, value);
        }
 
+       if (radv_image_has_fmask(image)) {
+               radv_initialize_fmask(cmd_buffer, image);
+       }
+
        if (radv_image_has_dcc(image)) {
                uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+               bool need_decompress_pass = false;
 
                if (radv_layout_dcc_compressed(image, dst_layout,
                                               dst_queue_mask)) {
                        value = 0x20202020u;
+                       need_decompress_pass = true;
                }
 
                radv_initialize_dcc(cmd_buffer, image, value);
 
-               radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image, false);
+               radv_update_fce_metadata(cmd_buffer, image,
+                                        need_decompress_pass);
        }
 
        if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
@@ -4157,10 +4808,28 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe
                        radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
                }
        } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
+               bool fce_eliminate = false, fmask_expand = false;
+
                if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
                    !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
-                       radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+                       fce_eliminate = true;
                }
+
+               if (radv_image_has_fmask(image)) {
+                       if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
+                           dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
+                               /* A FMASK decompress is required before doing
+                                * a MSAA decompress using FMASK.
+                                */
+                               fmask_expand = true;
+                       }
+               }
+
+               if (fce_eliminate || fmask_expand)
+                       radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+
+               if (fmask_expand)
+                       radv_expand_fmask_image_inplace(cmd_buffer, image, range);
        }
 }
 
@@ -4170,8 +4839,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
                                         VkImageLayout dst_layout,
                                         uint32_t src_family,
                                         uint32_t dst_family,
-                                        const VkImageSubresourceRange *range,
-                                        VkImageAspectFlags pending_clears)
+                                        const VkImageSubresourceRange *range)
 {
        if (image->exclusive && src_family != dst_family) {
                /* This is an acquire or a release operation and there will be
@@ -4190,6 +4858,9 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
                        return;
        }
 
+       if (src_layout == dst_layout)
+               return;
+
        unsigned src_queue_mask =
                radv_image_queue_family_mask(image, src_family,
                                             cmd_buffer->queue_family_index);
@@ -4201,7 +4872,7 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
                radv_handle_depth_image_transition(cmd_buffer, image,
                                                   src_layout, dst_layout,
                                                   src_queue_mask, dst_queue_mask,
-                                                  range, pending_clears);
+                                                  range);
        } else {
                radv_handle_color_image_transition(cmd_buffer, image,
                                                   src_layout, dst_layout,
@@ -4214,6 +4885,7 @@ struct radv_barrier_info {
        uint32_t eventCount;
        const VkEvent *pEvents;
        VkPipelineStageFlags srcStageMask;
+       VkPipelineStageFlags dstStageMask;
 };
 
 static void
@@ -4238,7 +4910,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer,
 
                MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
 
-               si_emit_wait_fence(cs, va, 1, 0xffffffff);
+               radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
                assert(cmd_buffer->cs->cdw <= cdw_max);
        }
 
@@ -4265,7 +4937,19 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer,
                                                        image);
        }
 
-       radv_stage_flush(cmd_buffer, info->srcStageMask);
+       /* The Vulkan spec 1.1.98 says:
+        *
+        * "An execution dependency with only
+        *  VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
+        *  will only prevent that stage from executing in subsequently
+        *  submitted commands. As this stage does not perform any actual
+        *  execution, this is not observable - in effect, it does not delay
+        *  processing of subsequent commands. Similarly an execution dependency
+        *  with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
+        *  will effectively not wait for any prior commands to complete."
+        */
+       if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
+               radv_stage_flush(cmd_buffer, info->srcStageMask);
        cmd_buffer->state.flush_bits |= src_flush_bits;
 
        for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
@@ -4275,14 +4959,15 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer,
                                             pImageMemoryBarriers[i].newLayout,
                                             pImageMemoryBarriers[i].srcQueueFamilyIndex,
                                             pImageMemoryBarriers[i].dstQueueFamilyIndex,
-                                            &pImageMemoryBarriers[i].subresourceRange,
-                                            0);
+                                            &pImageMemoryBarriers[i].subresourceRange);
        }
 
        /* Make sure CP DMA is idle because the driver might have performed a
         * DMA operation for copying or filling buffers/images.
         */
-       si_cp_dma_wait_for_idle(cmd_buffer);
+       if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
+                                 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
+               si_cp_dma_wait_for_idle(cmd_buffer);
 
        cmd_buffer->state.flush_bits |= dst_flush_bits;
 }
@@ -4305,6 +4990,7 @@ void radv_CmdPipelineBarrier(
        info.eventCount = 0;
        info.pEvents = NULL;
        info.srcStageMask = srcStageMask;
+       info.dstStageMask = destStageMask;
 
        radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
                     bufferMemoryBarrierCount, pBufferMemoryBarriers,
@@ -4320,9 +5006,11 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
        struct radeon_cmdbuf *cs = cmd_buffer->cs;
        uint64_t va = radv_buffer_get_va(event->bo);
 
+       si_emit_cache_flush(cmd_buffer);
+
        radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
 
-       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
+       MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
 
        /* Flags that only require a top-of-pipe event. */
        VkPipelineStageFlags top_of_pipe_flags =
@@ -4337,14 +5025,16 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
        /* Make sure CP DMA is idle because the driver might have performed a
         * DMA operation for copying or filling buffers/images.
         */
-       si_cp_dma_wait_for_idle(cmd_buffer);
+       if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
+                        VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
+               si_cp_dma_wait_for_idle(cmd_buffer);
 
        /* TODO: Emit EOS events for syncing PS/CS stages. */
 
        if (!(stageMask & ~top_of_pipe_flags)) {
                /* Just need to sync the PFP engine. */
                radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
-               radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+               radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
                                S_370_WR_CONFIRM(1) |
                                S_370_ENGINE_SEL(V_370_PFP));
                radeon_emit(cs, va);
@@ -4353,7 +5043,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
        } else if (!(stageMask & ~post_index_fetch_flags)) {
                /* Sync ME because PFP reads index and indirect buffers. */
                radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
-               radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+               radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
                                S_370_WR_CONFIRM(1) |
                                S_370_ENGINE_SEL(V_370_ME));
                radeon_emit(cs, va);
@@ -4365,7 +5055,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
                                           cmd_buffer->device->physical_device->rad_info.chip_class,
                                           radv_cmd_buffer_uses_mec(cmd_buffer),
                                           V_028A90_BOTTOM_OF_PIPE_TS, 0,
-                                          EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
+                                          EOP_DATA_SEL_VALUE_32BIT, va, value,
                                           cmd_buffer->gfx9_eop_bug_va);
        }
 
@@ -4424,29 +5114,80 @@ void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
 }
 
 /* VK_EXT_conditional_rendering */
-void vkCmdBeginConditionalRenderingEXT(
+void radv_CmdBeginConditionalRenderingEXT(
        VkCommandBuffer                             commandBuffer,
        const VkConditionalRenderingBeginInfoEXT*   pConditionalRenderingBegin)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
        RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
-       bool inverted;
-       uint64_t va;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       bool draw_visible = true;
+       uint64_t pred_value = 0;
+       uint64_t va, new_va;
+       unsigned pred_offset;
 
        va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
 
-       inverted = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
+       /* By default, if the 32-bit value at offset in buffer memory is zero,
+        * then the rendering commands are discarded, otherwise they are
+        * executed as normal. If the inverted flag is set, all commands are
+        * discarded if the value is non zero.
+        */
+       if (pConditionalRenderingBegin->flags &
+           VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
+               draw_visible = false;
+       }
+
+       si_emit_cache_flush(cmd_buffer);
+
+       /* From the Vulkan spec 1.1.107:
+        *
+        * "If the 32-bit value at offset in buffer memory is zero, then the
+        *  rendering commands are discarded, otherwise they are executed as
+        *  normal. If the value of the predicate in buffer memory changes while
+        *  conditional rendering is active, the rendering commands may be
+        *  discarded in an implementation-dependent way. Some implementations
+        *  may latch the value of the predicate upon beginning conditional
+        *  rendering while others may read it before every rendering command."
+        *
+        * But, the AMD hardware treats the predicate as a 64-bit value which
+        * means we need a workaround in the driver. Luckily, it's not required
+        * to support if the value changes when predication is active.
+        *
+        * The workaround is as follows:
+        * 1) allocate a 64-value in the upload BO and initialize it to 0
+        * 2) copy the 32-bit predicate value to the upload BO
+        * 3) use the new allocated VA address for predication
+        *
+        * Based on the conditionalrender demo, it's faster to do the COPY_DATA
+        * in ME  (+ sync PFP) instead of PFP.
+        */
+       radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
+
+       new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
+
+       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+                       COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
+                       COPY_DATA_WR_CONFIRM);
+       radeon_emit(cs, va);
+       radeon_emit(cs, va >> 32);
+       radeon_emit(cs, new_va);
+       radeon_emit(cs, new_va >> 32);
+
+       radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+       radeon_emit(cs, 0);
 
        /* Enable predication for this command buffer. */
-       si_emit_set_predication_state(cmd_buffer, inverted, va);
+       si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
        cmd_buffer->state.predicating = true;
 
        /* Store conditional rendering user info. */
-       cmd_buffer->state.predication_type = inverted;
-       cmd_buffer->state.predication_va = va;
+       cmd_buffer->state.predication_type = draw_visible;
+       cmd_buffer->state.predication_va = new_va;
 }
 
-void vkCmdEndConditionalRenderingEXT(
+void radv_CmdEndConditionalRenderingEXT(
        VkCommandBuffer                             commandBuffer)
 {
        RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
@@ -4459,3 +5200,239 @@ void vkCmdEndConditionalRenderingEXT(
        cmd_buffer->state.predication_type = -1;
        cmd_buffer->state.predication_va = 0;
 }
+
+/* VK_EXT_transform_feedback */
+void radv_CmdBindTransformFeedbackBuffersEXT(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstBinding,
+    uint32_t                                    bindingCount,
+    const VkBuffer*                             pBuffers,
+    const VkDeviceSize*                         pOffsets,
+    const VkDeviceSize*                         pSizes)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+       uint8_t enabled_mask = 0;
+
+       assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
+       for (uint32_t i = 0; i < bindingCount; i++) {
+               uint32_t idx = firstBinding + i;
+
+               sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+               sb[idx].offset = pOffsets[i];
+               sb[idx].size = pSizes[i];
+
+               radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+                                  sb[idx].buffer->bo);
+
+               enabled_mask |= 1 << idx;
+       }
+
+       cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
+
+       cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
+}
+
+static void
+radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
+{
+       struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+
+       radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+       radeon_emit(cs,
+                   S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
+                   S_028B94_RAST_STREAM(0) |
+                   S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
+                   S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
+                   S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
+       radeon_emit(cs, so->hw_enabled_mask &
+                       so->enabled_stream_buffers_mask);
+
+       cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
+
+static void
+radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
+{
+       struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+       bool old_streamout_enabled = so->streamout_enabled;
+       uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
+
+       so->streamout_enabled = enable;
+
+       so->hw_enabled_mask = so->enabled_mask |
+                             (so->enabled_mask << 4) |
+                             (so->enabled_mask << 8) |
+                             (so->enabled_mask << 12);
+
+       if ((old_streamout_enabled != so->streamout_enabled) ||
+           (old_hw_enabled_mask != so->hw_enabled_mask))
+               radv_emit_streamout_enable(cmd_buffer);
+}
+
+static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
+{
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       unsigned reg_strmout_cntl;
+
+       /* The register is at different places on different ASICs. */
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
+               reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
+               radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
+       } else {
+               reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
+               radeon_set_config_reg(cs, reg_strmout_cntl, 0);
+       }
+
+       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
+
+       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+       radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
+       radeon_emit(cs, reg_strmout_cntl >> 2);  /* register */
+       radeon_emit(cs, 0);
+       radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
+       radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
+       radeon_emit(cs, 4); /* poll interval */
+}
+
+void radv_CmdBeginTransformFeedbackEXT(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstCounterBuffer,
+    uint32_t                                    counterBufferCount,
+    const VkBuffer*                             pCounterBuffers,
+    const VkDeviceSize*                         pCounterBufferOffsets)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+       struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       uint32_t i;
+
+       radv_flush_vgt_streamout(cmd_buffer);
+
+       assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+       for_each_bit(i, so->enabled_mask) {
+               int32_t counter_buffer_idx = i - firstCounterBuffer;
+               if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+                       counter_buffer_idx = -1;
+
+               /* AMD GCN binds streamout buffers as shader resources.
+                * VGT only counts primitives and tells the shader through
+                * SGPRs what to do.
+                */
+               radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
+               radeon_emit(cs, sb[i].size >> 2);       /* BUFFER_SIZE (in DW) */
+               radeon_emit(cs, so->stride_in_dw[i]);                   /* VTX_STRIDE (in DW) */
+
+               cmd_buffer->state.context_roll_without_scissor_emitted = true;
+
+               if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+                       /* The array of counter buffers is optional. */
+                       RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+                       uint64_t va = radv_buffer_get_va(buffer->bo);
+
+                       va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+                       /* Append */
+                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                                       STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+                                       STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, va); /* src address lo */
+                       radeon_emit(cs, va >> 32); /* src address hi */
+
+                       radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+               } else {
+                       /* Start from the beginning. */
+                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                                       STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+                                       STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+               }
+       }
+
+       radv_set_streamout_enable(cmd_buffer, true);
+}
+
+void radv_CmdEndTransformFeedbackEXT(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstCounterBuffer,
+    uint32_t                                    counterBufferCount,
+    const VkBuffer*                             pCounterBuffers,
+    const VkDeviceSize*                         pCounterBufferOffsets)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+       struct radeon_cmdbuf *cs = cmd_buffer->cs;
+       uint32_t i;
+
+       radv_flush_vgt_streamout(cmd_buffer);
+
+       assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+       for_each_bit(i, so->enabled_mask) {
+               int32_t counter_buffer_idx = i - firstCounterBuffer;
+               if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+                       counter_buffer_idx = -1;
+
+               if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+                       /* The array of counters buffer is optional. */
+                       RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+                       uint64_t va = radv_buffer_get_va(buffer->bo);
+
+                       va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                                       STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+                                       STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
+                                       STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
+                       radeon_emit(cs, va);            /* dst address lo */
+                       radeon_emit(cs, va >> 32);      /* dst address hi */
+                       radeon_emit(cs, 0);             /* unused */
+                       radeon_emit(cs, 0);             /* unused */
+
+                       radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+               }
+
+               /* Deactivate transform feedback by zeroing the buffer size.
+                * The counters (primitives generated, primitives emitted) may
+                * be enabled even if there is not buffer bound. This ensures
+                * that the primitives-emitted query won't increment.
+                */
+               radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+
+               cmd_buffer->state.context_roll_without_scissor_emitted = true;
+       }
+
+       radv_set_streamout_enable(cmd_buffer, false);
+}
+
+void radv_CmdDrawIndirectByteCountEXT(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    instanceCount,
+    uint32_t                                    firstInstance,
+    VkBuffer                                    _counterBuffer,
+    VkDeviceSize                                counterBufferOffset,
+    uint32_t                                    counterOffset,
+    uint32_t                                    vertexStride)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
+       struct radv_draw_info info = {};
+
+       info.instance_count = instanceCount;
+       info.first_instance = firstInstance;
+       info.strmout_buffer = counterBuffer;
+       info.strmout_buffer_offset = counterBufferOffset;
+       info.stride = vertexStride;
+
+       radv_draw(cmd_buffer, &info);
+}