radeonsi: set descriptor dirty mask on shader buffer unbind
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
index e9458ece0bc65241dde4afa5dfb82e1328a13469..f48ddb77aabe672ec38796983be4c61c6bc120a0 100644 (file)
@@ -60,6 +60,8 @@
 #include "si_shader.h"
 #include "sid.h"
 
+#include "util/u_format.h"
+#include "util/u_math.h"
 #include "util/u_memory.h"
 #include "util/u_suballoc.h"
 #include "util/u_upload_mgr.h"
@@ -109,7 +111,7 @@ static void si_init_descriptors(struct si_descriptors *desc,
        desc->list = CALLOC(num_elements, element_dw_size * 4);
        desc->element_dw_size = element_dw_size;
        desc->num_elements = num_elements;
-       desc->list_dirty = true; /* upload the list before the next draw */
+       desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
        desc->shader_userdata_offset = shader_userdata_index * 4;
 
        if (ce_offset) {
@@ -138,7 +140,7 @@ static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned s
                         unsigned *out_offset, struct r600_resource **out_buf) {
        uint64_t va;
 
-       u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
+       u_suballocator_alloc(sctx->ce_suballocator, size, 64, out_offset,
                             (struct pipe_resource**)out_buf);
        if (!out_buf)
                        return false;
@@ -151,34 +153,99 @@ static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned s
        radeon_emit(sctx->ce_ib, va);
        radeon_emit(sctx->ce_ib, va >> 32);
 
+       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
+                              RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
+
        sctx->ce_need_synchronization = true;
        return true;
 }
 
+static void si_reinitialize_ce_ram(struct si_context *sctx,
+                            struct si_descriptors *desc)
+{
+       if (desc->buffer) {
+               struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
+               unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
+               uint64_t va = buffer->gpu_address + desc->buffer_offset;
+               struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
+
+               if (!ib)
+                       ib = sctx->ce_ib;
+
+               list_size = align(list_size, 32);
+
+               radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
+               radeon_emit(ib, va);
+               radeon_emit(ib, va >> 32);
+               radeon_emit(ib, list_size / 4);
+               radeon_emit(ib, desc->ce_offset);
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+                                   RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+       }
+       desc->ce_ram_dirty = false;
+}
+
+void si_ce_enable_loads(struct radeon_winsys_cs *ib)
+{
+       radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
+       radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
+                       CONTEXT_CONTROL_LOAD_CE_RAM(1));
+       radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
+}
 
 static bool si_upload_descriptors(struct si_context *sctx,
-                                 struct si_descriptors *desc)
+                                 struct si_descriptors *desc,
+                                 struct r600_atom * atom)
 {
        unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
-       void *ptr;
 
-       if (!desc->list_dirty)
+       if (!desc->dirty_mask)
                return true;
 
-       u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
-                      &desc->buffer_offset,
-                      (struct pipe_resource**)&desc->buffer, &ptr);
-       if (!desc->buffer)
-               return false; /* skip the draw call */
+       if (sctx->ce_ib) {
+               uint32_t const* list = (uint32_t const*)desc->list;
+
+               if (desc->ce_ram_dirty)
+                       si_reinitialize_ce_ram(sctx, desc);
+
+               while(desc->dirty_mask) {
+                       int begin, count;
+                       u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
+                                                    &count);
+
+                       begin *= desc->element_dw_size;
+                       count *= desc->element_dw_size;
+
+                       radeon_emit(sctx->ce_ib,
+                                   PKT3(PKT3_WRITE_CONST_RAM, count, 0));
+                       radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
+                       radeon_emit_array(sctx->ce_ib, list + begin, count);
+               }
+
+               if (!si_ce_upload(sctx, desc->ce_offset, list_size,
+                                          &desc->buffer_offset, &desc->buffer))
+                       return false;
+       } else {
+               void *ptr;
 
-       util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
+               u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
+                       &desc->buffer_offset,
+                       (struct pipe_resource**)&desc->buffer, &ptr);
+               if (!desc->buffer)
+                       return false; /* skip the draw call */
 
-       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
-                             RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+               util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
 
-       desc->list_dirty = false;
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+                                   RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+       }
        desc->pointer_dirty = true;
-       si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
+       desc->dirty_mask = 0;
+
+       if (atom)
+               si_mark_atom_dirty(sctx, atom);
+
        return true;
 }
 
@@ -188,89 +255,130 @@ static void si_release_sampler_views(struct si_sampler_views *views)
 {
        int i;
 
-       for (i = 0; i < Elements(views->views); i++) {
+       for (i = 0; i < ARRAY_SIZE(views->views); i++) {
                pipe_sampler_view_reference(&views->views[i], NULL);
        }
        si_release_descriptors(&views->desc);
 }
 
 static void si_sampler_view_add_buffer(struct si_context *sctx,
-                                      struct pipe_resource *resource)
+                                      struct pipe_resource *resource,
+                                      enum radeon_bo_usage usage)
 {
        struct r600_resource *rres = (struct r600_resource*)resource;
 
        if (!resource)
                return;
 
-       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
-                                 RADEON_USAGE_READ,
+       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
                                  r600_get_sampler_view_priority(rres));
 }
 
 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
                                          struct si_sampler_views *views)
 {
-       uint64_t mask = views->desc.enabled_mask;
+       unsigned mask = views->desc.enabled_mask;
 
        /* Add buffers to the CS. */
        while (mask) {
-               int i = u_bit_scan64(&mask);
+               int i = u_bit_scan(&mask);
 
-               si_sampler_view_add_buffer(sctx, views->views[i]->texture);
+               si_sampler_view_add_buffer(sctx, views->views[i]->texture,
+                                          RADEON_USAGE_READ);
        }
 
+       views->desc.ce_ram_dirty = true;
+
        if (!views->desc.buffer)
                return;
        radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
                              RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
 }
 
+void si_set_mutable_tex_desc_fields(struct r600_texture *tex,
+                                   const struct radeon_surf_level *base_level_info,
+                                   unsigned base_level, unsigned block_width,
+                                   bool is_stencil, uint32_t *state)
+{
+       uint64_t va = tex->resource.gpu_address + base_level_info->offset;
+       unsigned pitch = base_level_info->nblk_x * block_width;
+
+       state[1] &= C_008F14_BASE_ADDRESS_HI;
+       state[3] &= C_008F1C_TILING_INDEX;
+       state[4] &= C_008F20_PITCH;
+       state[6] &= C_008F28_COMPRESSION_EN;
+
+       state[0] = va >> 8;
+       state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
+       state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(tex, base_level,
+                                                            is_stencil));
+       state[4] |= S_008F20_PITCH(pitch - 1);
+
+       if (tex->dcc_offset) {
+               state[6] |= S_008F28_COMPRESSION_EN(1);
+               state[7] = (tex->resource.gpu_address +
+                           tex->dcc_offset +
+                           base_level_info->dcc_offset) >> 8;
+       }
+}
+
 static void si_set_sampler_view(struct si_context *sctx,
                                struct si_sampler_views *views,
-                               unsigned slot, struct pipe_sampler_view *view)
+                               unsigned slot, struct pipe_sampler_view *view,
+                               bool disallow_early_out)
 {
        struct si_sampler_view *rview = (struct si_sampler_view*)view;
 
-       if (view && view->texture && view->texture->target != PIPE_BUFFER &&
-           G_008F28_COMPRESSION_EN(rview->state[6]) &&
-           ((struct r600_texture*)view->texture)->dcc_offset == 0) {
-               rview->state[6] &= C_008F28_COMPRESSION_EN &
-                                  C_008F28_ALPHA_IS_ON_MSB;
-       } else if (views->views[slot] == view)
+       if (views->views[slot] == view && !disallow_early_out)
                return;
 
        if (view) {
                struct r600_texture *rtex = (struct r600_texture *)view->texture;
+               uint32_t *desc = views->desc.list + slot * 16;
 
-               si_sampler_view_add_buffer(sctx, view->texture);
+               si_sampler_view_add_buffer(sctx, view->texture,
+                                          RADEON_USAGE_READ);
 
                pipe_sampler_view_reference(&views->views[slot], view);
-               memcpy(views->desc.list + slot * 16, rview->state, 8*4);
+               memcpy(desc, rview->state, 8*4);
+
+               if (view->texture && view->texture->target != PIPE_BUFFER) {
+                       bool is_separate_stencil =
+                               rtex->is_depth && !rtex->is_flushing_texture &&
+                               rview->is_stencil_sampler;
+
+                       si_set_mutable_tex_desc_fields(rtex,
+                                                      rview->base_level_info,
+                                                      rview->base_level,
+                                                      rview->block_width,
+                                                      is_separate_stencil,
+                                                      desc);
+               }
 
                if (view->texture && view->texture->target != PIPE_BUFFER &&
                    rtex->fmask.size) {
-                       memcpy(views->desc.list + slot*16 + 8,
+                       memcpy(desc + 8,
                               rview->fmask_state, 8*4);
                } else {
                        /* Disable FMASK and bind sampler state in [12:15]. */
-                       memcpy(views->desc.list + slot*16 + 8,
+                       memcpy(desc + 8,
                               null_texture_descriptor, 4*4);
 
                        if (views->sampler_states[slot])
-                               memcpy(views->desc.list + slot*16 + 12,
+                               memcpy(desc + 12,
                                       views->sampler_states[slot], 4*4);
                }
 
-               views->desc.enabled_mask |= 1llu << slot;
+               views->desc.enabled_mask |= 1u << slot;
        } else {
                pipe_sampler_view_reference(&views->views[slot], NULL);
                memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
                /* Only clear the lower dwords of FMASK. */
                memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
-               views->desc.enabled_mask &= ~(1llu << slot);
+               views->desc.enabled_mask &= ~(1u << slot);
        }
 
-       views->desc.list_dirty = true;
+       views->desc.dirty_mask |= 1u << slot;
 }
 
 static bool is_compressed_colortex(struct r600_texture *rtex)
@@ -295,31 +403,35 @@ static void si_set_sampler_views(struct pipe_context *ctx,
                unsigned slot = start + i;
 
                if (!views || !views[i]) {
-                       samplers->depth_texture_mask &= ~(1llu << slot);
-                       samplers->compressed_colortex_mask &= ~(1llu << slot);
-                       si_set_sampler_view(sctx, &samplers->views, slot, NULL);
+                       samplers->depth_texture_mask &= ~(1u << slot);
+                       samplers->compressed_colortex_mask &= ~(1u << slot);
+                       si_set_sampler_view(sctx, &samplers->views, slot, NULL, false);
                        continue;
                }
 
-               si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
+               si_set_sampler_view(sctx, &samplers->views, slot, views[i], false);
 
                if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
                        struct r600_texture *rtex =
                                (struct r600_texture*)views[i]->texture;
 
                        if (rtex->is_depth && !rtex->is_flushing_texture) {
-                               samplers->depth_texture_mask |= 1llu << slot;
+                               samplers->depth_texture_mask |= 1u << slot;
                        } else {
-                               samplers->depth_texture_mask &= ~(1llu << slot);
+                               samplers->depth_texture_mask &= ~(1u << slot);
                        }
                        if (is_compressed_colortex(rtex)) {
-                               samplers->compressed_colortex_mask |= 1llu << slot;
+                               samplers->compressed_colortex_mask |= 1u << slot;
                        } else {
-                               samplers->compressed_colortex_mask &= ~(1llu << slot);
+                               samplers->compressed_colortex_mask &= ~(1u << slot);
                        }
+
+                       if (rtex->dcc_offset &&
+                           p_atomic_read(&rtex->framebuffers_bound))
+                               sctx->need_check_render_feedback = true;
                } else {
-                       samplers->depth_texture_mask &= ~(1llu << slot);
-                       samplers->compressed_colortex_mask &= ~(1llu << slot);
+                       samplers->depth_texture_mask &= ~(1u << slot);
+                       samplers->compressed_colortex_mask &= ~(1u << slot);
                }
        }
 }
@@ -327,19 +439,19 @@ static void si_set_sampler_views(struct pipe_context *ctx,
 static void
 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
 {
-       uint64_t mask = samplers->views.desc.enabled_mask;
+       unsigned mask = samplers->views.desc.enabled_mask;
 
        while (mask) {
-               int i = u_bit_scan64(&mask);
+               int i = u_bit_scan(&mask);
                struct pipe_resource *res = samplers->views.views[i]->texture;
 
                if (res && res->target != PIPE_BUFFER) {
                        struct r600_texture *rtex = (struct r600_texture *)res;
 
                        if (is_compressed_colortex(rtex)) {
-                               samplers->compressed_colortex_mask |= 1llu << i;
+                               samplers->compressed_colortex_mask |= 1u << i;
                        } else {
-                               samplers->compressed_colortex_mask &= ~(1llu << i);
+                               samplers->compressed_colortex_mask &= ~(1u << i);
                        }
                }
        }
@@ -373,9 +485,12 @@ si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *imag
 
                assert(view->resource);
 
-               si_sampler_view_add_buffer(sctx, view->resource);
+               si_sampler_view_add_buffer(sctx, view->resource,
+                                          RADEON_USAGE_READWRITE);
        }
 
+       images->desc.ce_ram_dirty = true;
+
        if (images->desc.buffer) {
                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                          images->desc.buffer,
@@ -387,104 +502,147 @@ si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *imag
 static void
 si_disable_shader_image(struct si_images_info *images, unsigned slot)
 {
-       if (images->desc.enabled_mask & (1llu << slot)) {
+       if (images->desc.enabled_mask & (1u << slot)) {
                pipe_resource_reference(&images->views[slot].resource, NULL);
                images->compressed_colortex_mask &= ~(1 << slot);
 
                memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
-               images->desc.enabled_mask &= ~(1llu << slot);
-               images->desc.list_dirty = true;
+               images->desc.enabled_mask &= ~(1u << slot);
+               images->desc.dirty_mask |= 1u << slot;
        }
 }
 
 static void
-si_set_shader_images(struct pipe_context *pipe, unsigned shader,
-                    unsigned start_slot, unsigned count,
-                    struct pipe_image_view *views)
+si_mark_image_range_valid(struct pipe_image_view *view)
 {
-       struct si_context *ctx = (struct si_context *)pipe;
-       struct si_screen *screen = ctx->screen;
-       struct si_images_info *images = &ctx->images[shader];
-       unsigned i, slot;
+       struct r600_resource *res = (struct r600_resource *)view->resource;
+       const struct util_format_description *desc;
+       unsigned stride;
 
-       assert(shader < SI_NUM_SHADERS);
+       assert(res && res->b.b.target == PIPE_BUFFER);
 
-       if (!count)
+       desc = util_format_description(view->format);
+       stride = desc->block.bits / 8;
+
+       util_range_add(&res->valid_buffer_range,
+                      stride * (view->u.buf.first_element),
+                      stride * (view->u.buf.last_element + 1));
+}
+
+static void si_set_shader_image(struct si_context *ctx,
+                               struct si_images_info *images,
+                               unsigned slot, struct pipe_image_view *view)
+{
+       struct si_screen *screen = ctx->screen;
+       struct r600_resource *res;
+
+       if (!view || !view->resource) {
+               si_disable_shader_image(images, slot);
                return;
+       }
 
-       assert(start_slot + count <= SI_NUM_IMAGES);
+       res = (struct r600_resource *)view->resource;
 
-       for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
-               struct r600_resource *res;
+       if (&images->views[slot] != view)
+               util_copy_image_view(&images->views[slot], view);
 
-               if (!views || !views[i].resource) {
-                       si_disable_shader_image(images, slot);
-                       continue;
-               }
+       si_sampler_view_add_buffer(ctx, &res->b.b,
+                                  RADEON_USAGE_READWRITE);
+
+       if (res->b.b.target == PIPE_BUFFER) {
+               if (view->access & PIPE_IMAGE_ACCESS_WRITE)
+                       si_mark_image_range_valid(view);
+
+               si_make_buffer_descriptor(screen, res,
+                                         view->format,
+                                         view->u.buf.first_element,
+                                         view->u.buf.last_element,
+                                         images->desc.list + slot * 8);
+               images->compressed_colortex_mask &= ~(1 << slot);
+       } else {
+               static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
+               struct r600_texture *tex = (struct r600_texture *)res;
+               unsigned level;
+               unsigned width, height, depth;
+               uint32_t *desc = images->desc.list + slot * 8;
 
-               res = (struct r600_resource *)views[i].resource;
-               util_copy_image_view(&images->views[slot], &views[i]);
+               assert(!tex->is_depth);
+               assert(tex->fmask.size == 0);
 
-               si_sampler_view_add_buffer(ctx, &res->b.b);
+               if (tex->dcc_offset &&
+                   view->access & PIPE_IMAGE_ACCESS_WRITE)
+                       r600_texture_disable_dcc(&screen->b, tex);
 
-               if (res->b.b.target == PIPE_BUFFER) {
-                       si_make_buffer_descriptor(screen, res,
-                                                 views[i].format,
-                                                 views[i].u.buf.first_element,
-                                                 views[i].u.buf.last_element,
-                                                 images->desc.list + slot * 8);
-                       images->compressed_colortex_mask &= ~(1 << slot);
+               if (is_compressed_colortex(tex)) {
+                       images->compressed_colortex_mask |= 1 << slot;
                } else {
-                       static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
-                       struct r600_texture *tex = (struct r600_texture *)res;
-                       unsigned level;
-                       unsigned width, height, depth;
+                       images->compressed_colortex_mask &= ~(1 << slot);
+               }
 
-                       assert(!tex->is_depth);
-                       assert(tex->fmask.size == 0);
+               if (tex->dcc_offset &&
+                   p_atomic_read(&tex->framebuffers_bound))
+                       ctx->need_check_render_feedback = true;
 
-                       if (tex->dcc_offset &&
-                           views[i].access & PIPE_IMAGE_ACCESS_WRITE)
-                               r600_texture_disable_dcc(&screen->b, tex);
+               /* Always force the base level to the selected level.
+                *
+                * This is required for 3D textures, where otherwise
+                * selecting a single slice for non-layered bindings
+                * fails. It doesn't hurt the other targets.
+                */
+               level = view->u.tex.level;
+               width = u_minify(res->b.b.width0, level);
+               height = u_minify(res->b.b.height0, level);
+               depth = u_minify(res->b.b.depth0, level);
+
+               si_make_texture_descriptor(screen, tex,
+                                          false, res->b.b.target,
+                                          view->format, swizzle,
+                                          0, 0,
+                                          view->u.tex.first_layer,
+                                          view->u.tex.last_layer,
+                                          width, height, depth,
+                                          desc, NULL);
+               si_set_mutable_tex_desc_fields(tex, &tex->surface.level[level], level,
+                                              util_format_get_blockwidth(view->format),
+                                              false, desc);
+       }
 
-                       if (is_compressed_colortex(tex)) {
-                               images->compressed_colortex_mask |= 1 << slot;
-                       } else {
-                               images->compressed_colortex_mask &= ~(1 << slot);
-                       }
+       images->desc.enabled_mask |= 1u << slot;
+       images->desc.dirty_mask |= 1u << slot;
+}
 
-                       /* Always force the base level to the selected level.
-                        *
-                        * This is required for 3D textures, where otherwise
-                        * selecting a single slice for non-layered bindings
-                        * fails. It doesn't hurt the other targets.
-                        */
-                       level = views[i].u.tex.level;
-                       width = u_minify(res->b.b.width0, level);
-                       height = u_minify(res->b.b.height0, level);
-                       depth = u_minify(res->b.b.depth0, level);
-
-                       si_make_texture_descriptor(screen, tex, false, res->b.b.target,
-                                                  views[i].format, swizzle,
-                                                  level, 0, 0,
-                                                  views[i].u.tex.first_layer, views[i].u.tex.last_layer,
-                                                  width, height, depth,
-                                                  images->desc.list + slot * 8,
-                                                  NULL);
-               }
+static void
+si_set_shader_images(struct pipe_context *pipe, unsigned shader,
+                    unsigned start_slot, unsigned count,
+                    struct pipe_image_view *views)
+{
+       struct si_context *ctx = (struct si_context *)pipe;
+       struct si_images_info *images = &ctx->images[shader];
+       unsigned i, slot;
+
+       assert(shader < SI_NUM_SHADERS);
 
-               images->desc.enabled_mask |= 1llu << slot;
-               images->desc.list_dirty = true;
+       if (!count)
+               return;
+
+       assert(start_slot + count <= SI_NUM_IMAGES);
+
+       if (views) {
+               for (i = 0, slot = start_slot; i < count; ++i, ++slot)
+                       si_set_shader_image(ctx, images, slot, &views[i]);
+       } else {
+               for (i = 0, slot = start_slot; i < count; ++i, ++slot)
+                       si_set_shader_image(ctx, images, slot, NULL);
        }
 }
 
 static void
 si_images_update_compressed_colortex_mask(struct si_images_info *images)
 {
-       uint64_t mask = images->desc.enabled_mask;
+       unsigned mask = images->desc.enabled_mask;
 
        while (mask) {
-               int i = u_bit_scan64(&mask);
+               int i = u_bit_scan(&mask);
                struct pipe_resource *res = images->views[i].resource;
 
                if (res && res->target != PIPE_BUFFER) {
@@ -532,7 +690,7 @@ static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
                        continue;
 
                memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
-               desc->list_dirty = true;
+               desc->dirty_mask |= 1u << slot;
        }
 }
 
@@ -568,17 +726,19 @@ static void si_release_buffer_resources(struct si_buffer_resources *buffers)
 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
                                             struct si_buffer_resources *buffers)
 {
-       uint64_t mask = buffers->desc.enabled_mask;
+       unsigned mask = buffers->desc.enabled_mask;
 
        /* Add buffers to the CS. */
        while (mask) {
-               int i = u_bit_scan64(&mask);
+               int i = u_bit_scan(&mask);
 
                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                      (struct r600_resource*)buffers->buffers[i],
                                      buffers->shader_usage, buffers->priority);
        }
 
+       buffers->desc.ce_ram_dirty = true;
+
        if (!buffers->desc.buffer)
                return;
        radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
@@ -597,7 +757,7 @@ static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
        for (i = 0; i < count; i++) {
                int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
 
-               if (vb >= Elements(sctx->vertex_buffer))
+               if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
                        continue;
                if (!sctx->vertex_buffer[vb].buffer)
                        continue;
@@ -649,7 +809,7 @@ static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
                unsigned offset;
                uint32_t *desc = &ptr[i*4];
 
-               if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
+               if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
                        memset(desc, 0, 16);
                        continue;
                }
@@ -707,19 +867,14 @@ void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuf
 
        u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
                       (struct pipe_resource**)rbuffer, &tmp);
-       if (rbuffer)
+       if (*rbuffer)
                util_memcpy_cpu_to_le32(tmp, ptr, size);
 }
 
-static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
-                                  struct pipe_constant_buffer *input)
+void si_set_constant_buffer(struct si_context *sctx,
+                           struct si_buffer_resources *buffers,
+                           uint slot, struct pipe_constant_buffer *input)
 {
-       struct si_context *sctx = (struct si_context *)ctx;
-       struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
-
-       if (shader >= SI_NUM_SHADERS)
-               return;
-
        assert(slot < buffers->desc.num_elements);
        pipe_resource_reference(&buffers->buffers[slot], NULL);
 
@@ -742,7 +897,7 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s
                                               input->buffer_size, &buffer_offset);
                        if (!buffer) {
                                /* Just unbind on failure. */
-                               si_set_constant_buffer(ctx, shader, slot, NULL);
+                               si_set_constant_buffer(sctx, buffers, slot, NULL);
                                return;
                        }
                        va = r600_resource(buffer)->gpu_address + buffer_offset;
@@ -768,14 +923,26 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s
                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                      (struct r600_resource*)buffer,
                                      buffers->shader_usage, buffers->priority);
-               buffers->desc.enabled_mask |= 1llu << slot;
+               buffers->desc.enabled_mask |= 1u << slot;
        } else {
                /* Clear the descriptor. */
                memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
-               buffers->desc.enabled_mask &= ~(1llu << slot);
+               buffers->desc.enabled_mask &= ~(1u << slot);
        }
 
-       buffers->desc.list_dirty = true;
+       buffers->desc.dirty_mask |= 1u << slot;
+}
+
+static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
+                                       uint shader, uint slot,
+                                       struct pipe_constant_buffer *input)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+
+       if (shader >= SI_NUM_SHADERS)
+               return;
+
+       si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
 }
 
 /* SHADER BUFFERS */
@@ -800,7 +967,8 @@ static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
                if (!sbuffer || !sbuffer->buffer) {
                        pipe_resource_reference(&buffers->buffers[slot], NULL);
                        memset(desc, 0, sizeof(uint32_t) * 4);
-                       buffers->desc.enabled_mask &= ~(1llu << slot);
+                       buffers->desc.enabled_mask &= ~(1u << slot);
+                       buffers->desc.dirty_mask |= 1u << slot;
                        continue;
                }
 
@@ -821,25 +989,21 @@ static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
                pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
                                      buffers->shader_usage, buffers->priority);
-               buffers->desc.enabled_mask |= 1llu << slot;
+               buffers->desc.enabled_mask |= 1u << slot;
+               buffers->desc.dirty_mask |= 1u << slot;
        }
-
-       buffers->desc.list_dirty = true;
 }
 
 /* RING BUFFERS */
 
-void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
+void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
                        struct pipe_resource *buffer,
                        unsigned stride, unsigned num_records,
                        bool add_tid, bool swizzle,
                        unsigned element_size, unsigned index_stride, uint64_t offset)
 {
        struct si_context *sctx = (struct si_context *)ctx;
-       struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
-
-       if (shader >= SI_NUM_SHADERS)
-               return;
+       struct si_buffer_resources *buffers = &sctx->rw_buffers;
 
        /* The stride field in the resource descriptor has 14 bits */
        assert(stride < (1 << 14));
@@ -912,14 +1076,14 @@ void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                      (struct r600_resource*)buffer,
                                      buffers->shader_usage, buffers->priority);
-               buffers->desc.enabled_mask |= 1llu << slot;
+               buffers->desc.enabled_mask |= 1u << slot;
        } else {
                /* Clear the descriptor. */
                memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
-               buffers->desc.enabled_mask &= ~(1llu << slot);
+               buffers->desc.enabled_mask &= ~(1u << slot);
        }
 
-       buffers->desc.list_dirty = true;
+       buffers->desc.dirty_mask |= 1u << slot;
 }
 
 /* STREAMOUT BUFFERS */
@@ -930,7 +1094,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
                                     const unsigned *offsets)
 {
        struct si_context *sctx = (struct si_context *)ctx;
-       struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
+       struct si_buffer_resources *buffers = &sctx->rw_buffers;
        unsigned old_num_targets = sctx->b.streamout.num_targets;
        unsigned i, bufidx;
 
@@ -968,7 +1132,8 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
         * start writing to the targets.
         */
        if (num_targets)
-               sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
+               sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+                                SI_CONTEXT_CS_PARTIAL_FLUSH;
 
        /* Streamout buffers must be bound in 2 places:
         * 1) in VGT by setting the VGT_STRMOUT registers
@@ -980,7 +1145,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
 
        /* Set the shader resources.*/
        for (i = 0; i < num_targets; i++) {
-               bufidx = SI_SO_BUF_OFFSET + i;
+               bufidx = SI_VS_STREAMOUT_BUF0 + i;
 
                if (targets[i]) {
                        struct pipe_resource *buffer = targets[i]->buffer;
@@ -1008,25 +1173,25 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
                        radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                              (struct r600_resource*)buffer,
                                              buffers->shader_usage, buffers->priority);
-                       buffers->desc.enabled_mask |= 1llu << bufidx;
+                       buffers->desc.enabled_mask |= 1u << bufidx;
                } else {
                        /* Clear the descriptor and unset the resource. */
                        memset(buffers->desc.list + bufidx*4, 0,
                               sizeof(uint32_t) * 4);
                        pipe_resource_reference(&buffers->buffers[bufidx],
                                                NULL);
-                       buffers->desc.enabled_mask &= ~(1llu << bufidx);
+                       buffers->desc.enabled_mask &= ~(1u << bufidx);
                }
+               buffers->desc.dirty_mask |= 1u << bufidx;
        }
        for (; i < old_num_targets; i++) {
-               bufidx = SI_SO_BUF_OFFSET + i;
+               bufidx = SI_VS_STREAMOUT_BUF0 + i;
                /* Clear the descriptor and unset the resource. */
                memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
                pipe_resource_reference(&buffers->buffers[bufidx], NULL);
-               buffers->desc.enabled_mask &= ~(1llu << bufidx);
+               buffers->desc.enabled_mask &= ~(1u << bufidx);
+               buffers->desc.dirty_mask |= 1u << bufidx;
        }
-
-       buffers->desc.list_dirty = true;
 }
 
 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
@@ -1048,6 +1213,26 @@ static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
                  S_008F04_BASE_ADDRESS_HI(va >> 32);
 }
 
+/* INTERNAL CONST BUFFERS */
+
+static void si_set_polygon_stipple(struct pipe_context *ctx,
+                                  const struct pipe_poly_stipple *state)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct pipe_constant_buffer cb = {};
+       unsigned stipple[32];
+       int i;
+
+       for (i = 0; i < 32; i++)
+               stipple[i] = util_bitreverse(state->stipple[i]);
+
+       cb.user_buffer = stipple;
+       cb.buffer_size = sizeof(stipple);
+
+       si_set_constant_buffer(sctx, &sctx->rw_buffers,
+                              SI_PS_CONST_POLY_STIPPLE, &cb);
+}
+
 /* TEXTURE METADATA ENABLE/DISABLE */
 
 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
@@ -1070,15 +1255,15 @@ static void si_reset_buffer_resources(struct si_context *sctx,
                                      struct pipe_resource *buf,
                                      uint64_t old_va)
 {
-       uint64_t mask = buffers->desc.enabled_mask;
+       unsigned mask = buffers->desc.enabled_mask;
 
        while (mask) {
-               unsigned i = u_bit_scan64(&mask);
+               unsigned i = u_bit_scan(&mask);
                if (buffers->buffers[i] == buf) {
                        si_desc_reset_buffer_offset(&sctx->b.b,
                                                    buffers->desc.list + i*4,
                                                    old_va, buf);
-                       buffers->desc.list_dirty = true;
+                       buffers->desc.dirty_mask |= 1u << i;
 
                        radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                                (struct r600_resource *)buf,
@@ -1107,7 +1292,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
 
        /* Reallocate the buffer in the same pipe_resource. */
        r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
-                          alignment, TRUE);
+                          alignment);
 
        /* We changed the buffer, now we need to bind it where the old one
         * was bound. This consists of 2 things:
@@ -1119,7 +1304,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
        for (i = 0; i < num_elems; i++) {
                int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
 
-               if (vb >= Elements(sctx->vertex_buffer))
+               if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
                        continue;
                if (!sctx->vertex_buffer[vb].buffer)
                        continue;
@@ -1130,33 +1315,27 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
                }
        }
 
-       /* Read/Write buffers. */
-       for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
-               struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
-               uint64_t mask = buffers->desc.enabled_mask;
+       /* Streamout buffers. (other internal buffers can't be invalidated) */
+       for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
+               struct si_buffer_resources *buffers = &sctx->rw_buffers;
 
-               while (mask) {
-                       i = u_bit_scan64(&mask);
-                       if (buffers->buffers[i] == buf) {
-                               si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
-                                                           old_va, buf);
-                               buffers->desc.list_dirty = true;
+               if (buffers->buffers[i] != buf)
+                       continue;
 
-                               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                                     rbuffer, buffers->shader_usage,
-                                                     buffers->priority);
-
-                               if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
-                                       /* Update the streamout state. */
-                                       if (sctx->b.streamout.begin_emitted) {
-                                               r600_emit_streamout_end(&sctx->b);
-                                       }
-                                       sctx->b.streamout.append_bitmask =
-                                               sctx->b.streamout.enabled_mask;
-                                       r600_streamout_buffers_dirty(&sctx->b);
-                               }
-                       }
-               }
+               si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
+                                           old_va, buf);
+               buffers->desc.dirty_mask |= 1u << i;
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                         rbuffer, buffers->shader_usage,
+                                         buffers->priority);
+
+               /* Update the streamout state. */
+               if (sctx->b.streamout.begin_emitted)
+                       r600_emit_streamout_end(&sctx->b);
+               sctx->b.streamout.append_bitmask =
+                               sctx->b.streamout.enabled_mask;
+               r600_streamout_buffers_dirty(&sctx->b);
        }
 
        /* Constant and shader buffers. */
@@ -1176,16 +1355,16 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
        /* Texture buffers - update bindings. */
        for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
                struct si_sampler_views *views = &sctx->samplers[shader].views;
-               uint64_t mask = views->desc.enabled_mask;
+               unsigned mask = views->desc.enabled_mask;
 
                while (mask) {
-                       unsigned i = u_bit_scan64(&mask);
+                       unsigned i = u_bit_scan(&mask);
                        if (views->views[i]->texture == buf) {
                                si_desc_reset_buffer_offset(ctx,
                                                            views->desc.list +
                                                            i * 16 + 4,
                                                            old_va, buf);
-                               views->desc.list_dirty = true;
+                               views->desc.dirty_mask |= 1u << i;
 
                                radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
                                                      rbuffer, RADEON_USAGE_READ,
@@ -1203,10 +1382,13 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
                        unsigned i = u_bit_scan(&mask);
 
                        if (images->views[i].resource == buf) {
+                               if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+                                       si_mark_image_range_valid(&images->views[i]);
+
                                si_desc_reset_buffer_offset(
                                        ctx, images->desc.list + i * 8 + 4,
                                        old_va, buf);
-                               images->desc.list_dirty = true;
+                               images->desc.dirty_mask |= 1u << i;
 
                                radeon_add_to_buffer_list(
                                        &sctx->b, &sctx->b.gfx, rbuffer,
@@ -1217,13 +1399,52 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
        }
 }
 
+/* Update mutable image descriptor fields of all bound textures. */
+void si_update_all_texture_descriptors(struct si_context *sctx)
+{
+       unsigned shader;
+
+       for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+               struct si_sampler_views *samplers = &sctx->samplers[shader].views;
+               struct si_images_info *images = &sctx->images[shader];
+               unsigned mask;
+
+               /* Images. */
+               mask = images->desc.enabled_mask;
+               while (mask) {
+                       unsigned i = u_bit_scan(&mask);
+                       struct pipe_image_view *view = &images->views[i];
+
+                       if (!view->resource ||
+                           view->resource->target == PIPE_BUFFER)
+                               continue;
+
+                       si_set_shader_image(sctx, images, i, view);
+               }
+
+               /* Sampler views. */
+               mask = samplers->desc.enabled_mask;
+               while (mask) {
+                       unsigned i = u_bit_scan(&mask);
+                       struct pipe_sampler_view *view = samplers->views[i];
+
+                       if (!view ||
+                           !view->texture ||
+                           view->texture->target == PIPE_BUFFER)
+                               continue;
+
+                       si_set_sampler_view(sctx, samplers, i,
+                                           samplers->views[i], true);
+               }
+       }
+}
+
 /* SHADER USER DATA */
 
 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
                                          unsigned shader)
 {
        sctx->const_buffers[shader].desc.pointer_dirty = true;
-       sctx->rw_buffers[shader].desc.pointer_dirty = true;
        sctx->shader_buffers[shader].desc.pointer_dirty = true;
        sctx->samplers[shader].views.desc.pointer_dirty = true;
        sctx->images[shader].desc.pointer_dirty = true;
@@ -1241,6 +1462,7 @@ static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
        for (i = 0; i < SI_NUM_SHADERS; i++) {
                si_mark_shader_pointers_dirty(sctx, i);
        }
+       sctx->rw_buffers.desc.pointer_dirty = true;
 }
 
 /* Set a base register address for user data constants in the given shader.
@@ -1312,39 +1534,32 @@ static void si_emit_shader_pointer(struct si_context *sctx,
        desc->pointer_dirty = keep_dirty;
 }
 
-void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
+void si_emit_graphics_shader_userdata(struct si_context *sctx,
+                                      struct r600_atom *atom)
 {
        unsigned i;
        uint32_t *sh_base = sctx->shader_userdata.sh_base;
 
-       if (sctx->gs_shader.cso) {
-               /* The VS copy shader needs these for clipping, streamout, and rings. */
-               unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
-               unsigned i = PIPE_SHADER_VERTEX;
-
-               si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
-               si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
-
-               if (sctx->tes_shader.cso) {
-                       /* The TESSEVAL shader needs this for the ESGS ring buffer. */
-                       si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
-                                              R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
-               }
-       } else if (sctx->tes_shader.cso) {
-               /* The TESSEVAL shader needs this for streamout. */
-               si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
+       if (sctx->rw_buffers.desc.pointer_dirty) {
+               si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+                                      R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
+               si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
                                       R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
+               si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+                                      R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
+               si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+                                      R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
+               si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+                                      R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
+               sctx->rw_buffers.desc.pointer_dirty = false;
        }
 
-       for (i = 0; i < SI_NUM_SHADERS; i++) {
+       for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
                unsigned base = sh_base[i];
 
                if (!base)
                        continue;
 
-               if (i != PIPE_SHADER_TESS_EVAL)
-                       si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
-
                si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
                si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
                si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
@@ -1353,6 +1568,20 @@ void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
        si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
 }
 
+void si_emit_compute_shader_userdata(struct si_context *sctx)
+{
+       unsigned base = R_00B900_COMPUTE_USER_DATA_0;
+
+       si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
+                              base, false);
+       si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
+                              base, false);
+       si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
+                              base, false);
+       si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
+                              base, false);
+}
+
 /* INIT/DEINIT/UPLOAD */
 
 void si_init_all_descriptors(struct si_context *sctx)
@@ -1365,10 +1594,6 @@ void si_init_all_descriptors(struct si_context *sctx)
                                         SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
                                         RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
                                         &ce_offset);
-               si_init_buffer_resources(&sctx->rw_buffers[i],
-                                        SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
-                                        RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
-                                        &ce_offset);
                si_init_buffer_resources(&sctx->shader_buffers[i],
                                         SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
                                         RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
@@ -1383,6 +1608,10 @@ void si_init_all_descriptors(struct si_context *sctx)
                                    null_image_descriptor, &ce_offset);
        }
 
+       si_init_buffer_resources(&sctx->rw_buffers,
+                                SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
+                                RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
+                                &ce_offset);
        si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
                            4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
 
@@ -1391,7 +1620,8 @@ void si_init_all_descriptors(struct si_context *sctx)
        /* Set pipe_context functions. */
        sctx->b.b.bind_sampler_states = si_bind_sampler_states;
        sctx->b.b.set_shader_images = si_set_shader_images;
-       sctx->b.b.set_constant_buffer = si_set_constant_buffer;
+       sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
+       sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
        sctx->b.b.set_shader_buffers = si_set_shader_buffers;
        sctx->b.b.set_sampler_views = si_set_sampler_views;
        sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
@@ -1399,7 +1629,7 @@ void si_init_all_descriptors(struct si_context *sctx)
 
        /* Shader user data. */
        si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
-                    si_emit_shader_userdata);
+                    si_emit_graphics_shader_userdata);
 
        /* Set default and immutable mappings. */
        si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
@@ -1408,19 +1638,39 @@ void si_init_all_descriptors(struct si_context *sctx)
        si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
 }
 
-bool si_upload_shader_descriptors(struct si_context *sctx)
+bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
 {
        int i;
 
        for (i = 0; i < SI_NUM_SHADERS; i++) {
-               if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
-                   !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
-                   !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc) ||
-                   !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
-                   !si_upload_descriptors(sctx, &sctx->images[i].desc))
+               if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
+                                          &sctx->shader_userdata.atom) ||
+                   !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
+                                          &sctx->shader_userdata.atom) ||
+                   !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
+                                          &sctx->shader_userdata.atom) ||
+                   !si_upload_descriptors(sctx, &sctx->images[i].desc,
+                                          &sctx->shader_userdata.atom))
                        return false;
        }
-       return si_upload_vertex_buffer_descriptors(sctx);
+       return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
+                                    &sctx->shader_userdata.atom) &&
+              si_upload_vertex_buffer_descriptors(sctx);
+}
+
+bool si_upload_compute_shader_descriptors(struct si_context *sctx)
+{
+       /* Does not update rw_buffers as that is not needed for compute shaders
+        * and the input buffer is using the same SGPR's anyway.
+        */
+       return si_upload_descriptors(sctx,
+                       &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
+              si_upload_descriptors(sctx,
+                      &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
+              si_upload_descriptors(sctx,
+                      &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
+              si_upload_descriptors(sctx,
+                      &sctx->images[PIPE_SHADER_COMPUTE].desc,  NULL);
 }
 
 void si_release_all_descriptors(struct si_context *sctx)
@@ -1429,11 +1679,11 @@ void si_release_all_descriptors(struct si_context *sctx)
 
        for (i = 0; i < SI_NUM_SHADERS; i++) {
                si_release_buffer_resources(&sctx->const_buffers[i]);
-               si_release_buffer_resources(&sctx->rw_buffers[i]);
                si_release_buffer_resources(&sctx->shader_buffers[i]);
                si_release_sampler_views(&sctx->samplers[i].views);
                si_release_image_views(&sctx->images[i]);
        }
+       si_release_buffer_resources(&sctx->rw_buffers);
        si_release_descriptors(&sctx->vertex_buffers);
 }
 
@@ -1443,11 +1693,11 @@ void si_all_descriptors_begin_new_cs(struct si_context *sctx)
 
        for (i = 0; i < SI_NUM_SHADERS; i++) {
                si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
-               si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
                si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
                si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
                si_image_views_begin_new_cs(sctx, &sctx->images[i]);
        }
+       si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
        si_vertex_buffers_begin_new_cs(sctx);
        si_shader_userdata_begin_new_cs(sctx);
 }