r600/sfn: simplify UBO lowering pass
[mesa.git] / src / gallium / drivers / r600 / r600_state_common.c
index d1251469079ef301cc1c0c769ff7366651cc9214..76dd931413220436c37d5a4f2f8e21dc4e38e0f6 100644 (file)
@@ -28,7 +28,7 @@
 #include "r600_shader.h"
 #include "r600d.h"
 
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format_s3tc.h"
 #include "util/u_index_modify.h"
 #include "util/u_memory.h"
 #include "util/u_upload_mgr.h"
 #include "tgsi/tgsi_scan.h"
 #include "tgsi/tgsi_ureg.h"
 
+#include "nir.h"
+#include "nir/nir_to_tgsi_info.h"
+#include "tgsi/tgsi_from_mesa.h"
+
 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw)
 {
        assert(!cb->buf);
@@ -77,7 +81,7 @@ void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom)
 
 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
        unsigned alpha_ref = a->sx_alpha_ref;
 
@@ -91,6 +95,33 @@ void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom
        radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
 }
 
+static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
+{
+       struct r600_context *rctx = (struct r600_context *)ctx;
+
+       if (!(flags & ~PIPE_BARRIER_UPDATE))
+               return;
+
+       if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
+               rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
+
+       if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
+                    PIPE_BARRIER_SHADER_BUFFER |
+                    PIPE_BARRIER_TEXTURE |
+                    PIPE_BARRIER_IMAGE |
+                    PIPE_BARRIER_STREAMOUT_BUFFER |
+                    PIPE_BARRIER_GLOBAL_BUFFER)) {
+               rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE|
+                       R600_CONTEXT_INV_TEX_CACHE;
+       }
+
+       if (flags & (PIPE_BARRIER_FRAMEBUFFER|
+                    PIPE_BARRIER_IMAGE))
+               rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV;
+
+       rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
+}
+
 static void r600_texture_barrier(struct pipe_context *ctx, unsigned flags)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
@@ -99,6 +130,7 @@ static void r600_texture_barrier(struct pipe_context *ctx, unsigned flags)
                       R600_CONTEXT_FLUSH_AND_INV_CB |
                       R600_CONTEXT_FLUSH_AND_INV |
                       R600_CONTEXT_WAIT_3D_IDLE;
+       rctx->framebuffer.do_update_surf_dirtiness = true;
 }
 
 static unsigned r600_conv_pipe_prim(unsigned prim)
@@ -217,7 +249,7 @@ static void r600_set_blend_color(struct pipe_context *ctx,
 
 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct pipe_blend_color *state = &rctx->blend_color.state;
 
        radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
@@ -229,7 +261,7 @@ void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
 
 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
 
        radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
@@ -263,7 +295,7 @@ static void r600_set_stencil_ref(struct pipe_context *ctx,
 
 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
 
        radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
@@ -391,7 +423,7 @@ static void r600_sampler_view_destroy(struct pipe_context *ctx,
 
        if (view->tex_resource->gpu_address &&
            view->tex_resource->b.b.target == PIPE_BUFFER)
-               LIST_DELINIT(&view->list);
+               list_delinit(&view->list);
 
        pipe_resource_reference(&state->texture, NULL);
        FREE(view);
@@ -518,24 +550,11 @@ static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
 static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
 {
        struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
-       r600_resource_reference(&shader->buffer, NULL);
+       if (shader)
+               r600_resource_reference(&shader->buffer, NULL);
        FREE(shader);
 }
 
-static void r600_set_index_buffer(struct pipe_context *ctx,
-                          const struct pipe_index_buffer *ib)
-{
-       struct r600_context *rctx = (struct r600_context *)ctx;
-
-       if (ib) {
-               pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
-               memcpy(&rctx->index_buffer, ib, sizeof(*ib));
-               r600_context_add_resource_size(ctx, ib->buffer);
-       } else {
-               pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
-       }
-}
-
 void r600_vertex_buffers_dirty(struct r600_context *rctx)
 {
        if (rctx->vertex_buffer_state.dirty_mask) {
@@ -560,22 +579,25 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
        /* Set vertex buffers. */
        if (input) {
                for (i = 0; i < count; i++) {
-                       if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
-                               if (input[i].buffer) {
+                       if ((input[i].buffer.resource != vb[i].buffer.resource) ||
+                           (vb[i].stride != input[i].stride) ||
+                           (vb[i].buffer_offset != input[i].buffer_offset) ||
+                           (vb[i].is_user_buffer != input[i].is_user_buffer)) {
+                               if (input[i].buffer.resource) {
                                        vb[i].stride = input[i].stride;
                                        vb[i].buffer_offset = input[i].buffer_offset;
-                                       pipe_resource_reference(&vb[i].buffer, input[i].buffer);
+                                       pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
                                        new_buffer_mask |= 1 << i;
-                                       r600_context_add_resource_size(ctx, input[i].buffer);
+                                       r600_context_add_resource_size(ctx, input[i].buffer.resource);
                                } else {
-                                       pipe_resource_reference(&vb[i].buffer, NULL);
+                                       pipe_resource_reference(&vb[i].buffer.resource, NULL);
                                        disable_mask |= 1 << i;
                                }
                        }
                }
        } else {
                for (i = 0; i < count; i++) {
-                       pipe_resource_reference(&vb[i].buffer, NULL);
+                       pipe_resource_reference(&vb[i].buffer.resource, NULL);
                }
                disable_mask = ((1ull << count) - 1);
        }
@@ -711,6 +733,58 @@ static void r600_update_compressed_colortex_mask(struct r600_samplerview_state *
        }
 }
 
+static int r600_get_hw_atomic_count(const struct pipe_context *ctx,
+                                   enum pipe_shader_type shader)
+{
+       const struct r600_context *rctx = (struct r600_context *)ctx;
+       int value = 0;
+       switch (shader) {
+       case PIPE_SHADER_FRAGMENT:
+       case PIPE_SHADER_COMPUTE:
+       default:
+               break;
+       case PIPE_SHADER_VERTEX:
+               value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+               break;
+       case PIPE_SHADER_GEOMETRY:
+               value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+                       rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+               break;
+       case PIPE_SHADER_TESS_EVAL:
+               value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+                       rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+                       (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0);
+               break;
+       case PIPE_SHADER_TESS_CTRL:
+               value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+                       rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+                       (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0) +
+                       rctx->tes_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+               break;
+       }
+       return value;
+}
+
+static void r600_update_compressed_colortex_mask_images(struct r600_image_state *images)
+{
+       uint32_t mask = images->enabled_mask;
+
+       while (mask) {
+               unsigned i = u_bit_scan(&mask);
+               struct pipe_resource *res = images->views[i].base.resource;
+
+               if (res && res->target != PIPE_BUFFER) {
+                       struct r600_texture *rtex = (struct r600_texture *)res;
+
+                       if (rtex->cmask.size) {
+                               images->compressed_colortex_mask |= 1 << i;
+                       } else {
+                               images->compressed_colortex_mask &= ~(1 << i);
+                       }
+               }
+       }
+}
+
 /* Compute the key for the hw shader variant */
 static inline void r600_shader_selector_key(const struct pipe_context *ctx,
                const struct r600_pipe_shader_selector *sel,
@@ -729,11 +803,17 @@ static inline void r600_shader_selector_key(const struct pipe_context *ctx,
                        key->vs.as_gs_a = true;
                        key->vs.prim_id_out = rctx->ps_shader->current->shader.input[rctx->ps_shader->current->shader.ps_prim_id_input].spi_sid;
                }
+               key->vs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_VERTEX);
                break;
        }
        case PIPE_SHADER_GEOMETRY:
+               key->gs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_GEOMETRY);
+               key->gs.tri_strip_adj_fix = rctx->gs_tri_strip_adj_fix;
                break;
        case PIPE_SHADER_FRAGMENT: {
+               if (rctx->ps_shader->info.images_declared)
+                       key->ps.image_size_const_offset = util_last_bit(rctx->samplers[PIPE_SHADER_FRAGMENT].views.enabled_mask);
+               key->ps.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_FRAGMENT);
                key->ps.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side;
                key->ps.alpha_to_one = rctx->alpha_to_one &&
                                      rctx->rasterizer && rctx->rasterizer->multisample_enable &&
@@ -746,9 +826,13 @@ static inline void r600_shader_selector_key(const struct pipe_context *ctx,
        }
        case PIPE_SHADER_TESS_EVAL:
                key->tes.as_es = (rctx->gs_shader != NULL);
+               key->tes.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_EVAL);
                break;
        case PIPE_SHADER_TESS_CTRL:
                key->tcs.prim_mode = rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+               key->tcs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_CTRL);
+               break;
+       case PIPE_SHADER_COMPUTE:
                break;
        default:
                assert(0);
@@ -757,7 +841,7 @@ static inline void r600_shader_selector_key(const struct pipe_context *ctx,
 
 /* Select the hw shader variant depending on the current state.
  * (*dirty) is set to 1 if current variant was changed */
-static int r600_shader_select(struct pipe_context *ctx,
+int r600_shader_select(struct pipe_context *ctx,
         struct r600_pipe_shader_selector* sel,
         bool *dirty)
 {
@@ -825,17 +909,39 @@ static int r600_shader_select(struct pipe_context *ctx,
        return 0;
 }
 
+struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx,
+                                                                 const void *prog, enum pipe_shader_ir ir,
+                                                                 unsigned pipe_shader_type)
+{
+       struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
+
+       sel->type = pipe_shader_type;
+       if (ir == PIPE_SHADER_IR_TGSI) {
+               sel->tokens = tgsi_dup_tokens((const struct tgsi_token *)prog);
+               tgsi_scan_shader(sel->tokens, &sel->info);
+       } else if (ir == PIPE_SHADER_IR_NIR){
+               sel->nir = nir_shader_clone(NULL, (const nir_shader *)prog);
+               nir_tgsi_scan_shader(sel->nir, &sel->info, true);
+       }
+       return sel;
+}
+
 static void *r600_create_shader_state(struct pipe_context *ctx,
                               const struct pipe_shader_state *state,
                               unsigned pipe_shader_type)
 {
-       struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
        int i;
-
-       sel->type = pipe_shader_type;
-       sel->tokens = tgsi_dup_tokens(state->tokens);
+       struct r600_pipe_shader_selector *sel;
+       
+       if (state->type == PIPE_SHADER_IR_TGSI)
+               sel = r600_create_shader_state_tokens(ctx, state->tokens, state->type, pipe_shader_type);
+       else if (state->type == PIPE_SHADER_IR_NIR) {
+               sel = r600_create_shader_state_tokens(ctx, state->ir.nir, state->type, pipe_shader_type);
+       } else
+               assert(0 && "Unknown shader type\n");
+       
+       sel->ir_type = state->type;
        sel->so = state->stream_output;
-       tgsi_scan_shader(state->tokens, &sel->info);
 
        switch (pipe_shader_type) {
        case PIPE_SHADER_GEOMETRY:
@@ -850,6 +956,7 @@ static void *r600_create_shader_state(struct pipe_context *ctx,
        case PIPE_SHADER_TESS_CTRL:
                sel->lds_patch_outputs_written_mask = 0;
                sel->lds_outputs_written_mask = 0;
+               bool texxcoord_semantic = ctx->screen->get_param(ctx->screen, PIPE_CAP_TGSI_TEXCOORD);
 
                for (i = 0; i < sel->info.num_outputs; i++) {
                        unsigned name = sel->info.output_semantic_name[i];
@@ -860,11 +967,11 @@ static void *r600_create_shader_state(struct pipe_context *ctx,
                        case TGSI_SEMANTIC_TESSOUTER:
                        case TGSI_SEMANTIC_PATCH:
                                sel->lds_patch_outputs_written_mask |=
-                                       1llu << r600_get_lds_unique_index(name, index);
+                                       1ull << r600_get_lds_unique_index(name, index, texxcoord_semantic);
                                break;
                        default:
                                sel->lds_outputs_written_mask |=
-                                       1llu << r600_get_lds_unique_index(name, index);
+                                       1ull << r600_get_lds_unique_index(name, index, texxcoord_semantic);
                        }
                }
                break;
@@ -931,24 +1038,31 @@ static void r600_bind_vs_state(struct pipe_context *ctx, void *state)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
 
-       if (!state)
+       if (!state || rctx->vs_shader == state)
                return;
 
        rctx->vs_shader = (struct r600_pipe_shader_selector *)state;
        r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
-       rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
+
+        if (rctx->vs_shader->so.num_outputs)
+           rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
 }
 
 static void r600_bind_gs_state(struct pipe_context *ctx, void *state)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
 
+       if (state == rctx->gs_shader)
+               return;
+
        rctx->gs_shader = (struct r600_pipe_shader_selector *)state;
        r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
 
        if (!state)
                return;
-       rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride;
+
+        if (rctx->gs_shader->so.num_outputs)
+           rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride;
 }
 
 static void r600_bind_tcs_state(struct pipe_context *ctx, void *state)
@@ -962,16 +1076,21 @@ static void r600_bind_tes_state(struct pipe_context *ctx, void *state)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
 
+       if (state == rctx->tes_shader)
+               return;
+
        rctx->tes_shader = (struct r600_pipe_shader_selector *)state;
        r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
 
        if (!state)
                return;
-       rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride;
+
+        if (rctx->tes_shader->so.num_outputs)
+           rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride;
 }
 
-static void r600_delete_shader_selector(struct pipe_context *ctx,
-               struct r600_pipe_shader_selector *sel)
+void r600_delete_shader_selector(struct pipe_context *ctx,
+                                struct r600_pipe_shader_selector *sel)
 {
        struct r600_pipe_shader *p = sel->current, *c;
        while (p) {
@@ -981,7 +1100,14 @@ static void r600_delete_shader_selector(struct pipe_context *ctx,
                p = c;
        }
 
-       free(sel->tokens);
+       if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
+               free(sel->tokens);
+               /* We might have converted the TGSI shader to a NIR shader */
+               if (sel->nir)
+                       ralloc_free(sel->nir);
+       }
+       else if (sel->ir_type == PIPE_SHADER_IR_NIR)
+               ralloc_free(sel->nir);
        free(sel);
 }
 
@@ -1128,16 +1254,23 @@ static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask
        r600_mark_atom_dirty(rctx, &rctx->sample_mask.atom);
 }
 
-static void r600_update_driver_const_buffers(struct r600_context *rctx)
+void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_only)
 {
        int sh, size;
        void *ptr;
        struct pipe_constant_buffer cb;
-       for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+       int start, end;
+
+       start = compute_only ? PIPE_SHADER_COMPUTE : 0;
+       end = compute_only ? PIPE_SHADER_TYPES : PIPE_SHADER_COMPUTE;
+
+       for (sh = start; sh < end; sh++) {
                struct r600_shader_driver_constants_info *info = &rctx->driver_consts[sh];
                if (!info->vs_ucp_dirty &&
                    !info->texture_const_dirty &&
-                   !info->ps_sample_pos_dirty)
+                   !info->ps_sample_pos_dirty &&
+                   !info->tcs_default_levels_dirty &&
+                   !info->cs_block_grid_size_dirty)
                        continue;
 
                ptr = info->constants;
@@ -1153,7 +1286,7 @@ static void r600_update_driver_const_buffers(struct r600_context *rctx)
                        info->vs_ucp_dirty = false;
                }
 
-               if (info->ps_sample_pos_dirty) {
+               else if (info->ps_sample_pos_dirty) {
                        assert(sh == PIPE_SHADER_FRAGMENT);
                        if (!size) {
                                ptr = rctx->sample_positions;
@@ -1164,6 +1297,31 @@ static void r600_update_driver_const_buffers(struct r600_context *rctx)
                        info->ps_sample_pos_dirty = false;
                }
 
+               else if (info->cs_block_grid_size_dirty) {
+                       assert(sh == PIPE_SHADER_COMPUTE);
+                       if (!size) {
+                               ptr = rctx->cs_block_grid_sizes;
+                               size = R600_CS_BLOCK_GRID_SIZE;
+                       } else {
+                               memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
+                       }
+                       info->cs_block_grid_size_dirty = false;
+               }
+
+               else if (info->tcs_default_levels_dirty) {
+                       /*
+                        * We'd only really need this for default tcs shader.
+                        */
+                       assert(sh == PIPE_SHADER_TESS_CTRL);
+                       if (!size) {
+                               ptr = rctx->tess_state;
+                               size = R600_TCS_DEFAULT_LEVELS_SIZE;
+                       } else {
+                               memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
+                       }
+                       info->tcs_default_levels_dirty = false;
+               }
+
                if (info->texture_const_dirty) {
                        assert (ptr);
                        assert (size);
@@ -1171,6 +1329,10 @@ static void r600_update_driver_const_buffers(struct r600_context *rctx)
                                memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE);
                        if (sh == PIPE_SHADER_FRAGMENT)
                                memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE);
+                       if (sh == PIPE_SHADER_COMPUTE)
+                               memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
+                       if (sh == PIPE_SHADER_TESS_CTRL)
+                               memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
                }
                info->texture_const_dirty = false;
 
@@ -1184,7 +1346,7 @@ static void r600_update_driver_const_buffers(struct r600_context *rctx)
 }
 
 static void *r600_alloc_buf_consts(struct r600_context *rctx, int shader_type,
-                                  int array_size, uint32_t *base_offset)
+                                  unsigned array_size, uint32_t *base_offset)
 {
        struct r600_shader_driver_constants_info *info = &rctx->driver_consts[shader_type];
        if (array_size + R600_UCP_SIZE > info->alloc_size) {
@@ -1219,7 +1381,7 @@ static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_ty
        samplers->views.dirty_buffer_constants = FALSE;
 
        bits = util_last_bit(samplers->views.enabled_mask);
-       array_size = bits * 8 * sizeof(uint32_t) * 4;
+       array_size = bits * 8 * sizeof(uint32_t);
 
        constants = r600_alloc_buf_consts(rctx, shader_type, array_size, &base_offset);
 
@@ -1242,41 +1404,64 @@ static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_ty
                        } else
                                constants[offset + 4] = 0;
 
-                       constants[offset + 5] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
+                       constants[offset + 5] = samplers->views.views[i]->base.u.buf.size /
+                                           util_format_get_blocksize(samplers->views.views[i]->base.format);
                        constants[offset + 6] = samplers->views.views[i]->base.texture->array_size / 6;
                }
        }
 
 }
 
-/* On evergreen we store two values
- * 1. buffer size for TXQ
- * 2. number of cube layers in a cube map array.
+/* On evergreen we store one value
+ * 1. number of cube layers in a cube map array.
  */
-static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
+void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
 {
        struct r600_textures_info *samplers = &rctx->samplers[shader_type];
-       int bits;
+       struct r600_image_state *images = NULL;
+       int bits, sview_bits, img_bits;
        uint32_t array_size;
        int i;
        uint32_t *constants;
        uint32_t base_offset;
-       if (!samplers->views.dirty_buffer_constants)
+
+       if (shader_type == PIPE_SHADER_FRAGMENT) {
+               images = &rctx->fragment_images;
+       } else if (shader_type == PIPE_SHADER_COMPUTE) {
+               images = &rctx->compute_images;
+       }
+
+       if (!samplers->views.dirty_buffer_constants &&
+           !(images && images->dirty_buffer_constants))
                return;
 
+       if (images)
+               images->dirty_buffer_constants = FALSE;
        samplers->views.dirty_buffer_constants = FALSE;
 
-       bits = util_last_bit(samplers->views.enabled_mask);
-       array_size = bits * 2 * sizeof(uint32_t) * 4;
+       bits = sview_bits = util_last_bit(samplers->views.enabled_mask);
+       if (images)
+               bits += util_last_bit(images->enabled_mask);
+       img_bits = bits;
+
+       array_size = bits * sizeof(uint32_t);
 
        constants = r600_alloc_buf_consts(rctx, shader_type, array_size,
                                          &base_offset);
 
-       for (i = 0; i < bits; i++) {
+       for (i = 0; i < sview_bits; i++) {
                if (samplers->views.enabled_mask & (1 << i)) {
-                       uint32_t offset = (base_offset / 4) + i * 2;
-                       constants[offset] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
-                       constants[offset + 1] = samplers->views.views[i]->base.texture->array_size / 6;
+                       uint32_t offset = (base_offset / 4) + i;
+                       constants[offset] = samplers->views.views[i]->base.texture->array_size / 6;
+               }
+       }
+       if (images) {
+               for (i = sview_bits; i < img_bits; i++) {
+                       int idx = i - sview_bits;
+                       if (images->enabled_mask & (1 << idx)) {
+                               uint32_t offset = (base_offset / 4) + i;
+                               constants[offset] = images->views[idx].base.resource->array_size / 6;
+                       }
                }
        }
 }
@@ -1284,14 +1469,13 @@ static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type
 /* set sample xy locations as array of fragment shader constants */
 void r600_set_sample_locations_constant_buffer(struct r600_context *rctx)
 {
-       int i;
        struct pipe_context *ctx = &rctx->b.b;
 
        assert(rctx->framebuffer.nr_samples < R600_UCP_SIZE);
        assert(rctx->framebuffer.nr_samples <= ARRAY_SIZE(rctx->sample_positions)/4);
 
        memset(rctx->sample_positions, 0, 4 * 4 * 16);
-       for (i = 0; i < rctx->framebuffer.nr_samples; i++) {
+       for (unsigned i = 0; i < rctx->framebuffer.nr_samples; i++) {
                ctx->get_sample_position(ctx, rctx->framebuffer.nr_samples, i, &rctx->sample_positions[4*i]);
                /* Also fill in center-zeroed positions used for interpolateAtSample */
                rctx->sample_positions[4*i + 2] = rctx->sample_positions[4*i + 0] - 0.5f;
@@ -1369,10 +1553,12 @@ static void r600_update_clip_state(struct r600_context *rctx,
 {
        if (current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl ||
            current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write ||
+           current->shader.cull_dist_write != rctx->clip_misc_state.cull_dist_write ||
            current->shader.vs_position_window_space != rctx->clip_misc_state.clip_disable ||
            current->shader.vs_out_viewport != rctx->clip_misc_state.vs_out_viewport) {
                rctx->clip_misc_state.pa_cl_vs_out_cntl = current->pa_cl_vs_out_cntl;
                rctx->clip_misc_state.clip_dist_write = current->shader.clip_dist_write;
+               rctx->clip_misc_state.cull_dist_write = current->shader.cull_dist_write;
                rctx->clip_misc_state.clip_disable = current->shader.vs_position_window_space;
                rctx->clip_misc_state.vs_out_viewport = current->shader.vs_out_viewport;
                r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom);
@@ -1390,11 +1576,11 @@ static void r600_generate_fixed_func_tcs(struct r600_context *rctx)
 
        assert(!rctx->fixed_func_tcs_shader);
 
-       ureg_DECL_constant2D(ureg, 0, 3, R600_LDS_INFO_CONST_BUFFER);
-       const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 2),
-                                   R600_LDS_INFO_CONST_BUFFER);
-       const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 3),
-                                   R600_LDS_INFO_CONST_BUFFER);
+       ureg_DECL_constant2D(ureg, 0, 1, R600_BUFFER_INFO_CONST_BUFFER);
+       const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 0),
+                                   R600_BUFFER_INFO_CONST_BUFFER);
+       const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 1),
+                                   R600_BUFFER_INFO_CONST_BUFFER);
 
        tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
        tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
@@ -1407,6 +1593,159 @@ static void r600_generate_fixed_func_tcs(struct r600_context *rctx)
                ureg_create_shader_and_destroy(ureg, &rctx->b.b);
 }
 
+void r600_update_compressed_resource_state(struct r600_context *rctx, bool compute_only)
+{
+       unsigned i;
+       unsigned counter;
+
+       counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter);
+       if (counter != rctx->b.last_compressed_colortex_counter) {
+               rctx->b.last_compressed_colortex_counter = counter;
+
+               if (compute_only) {
+                       r600_update_compressed_colortex_mask(&rctx->samplers[PIPE_SHADER_COMPUTE].views);
+               } else {
+                       for (i = 0; i < PIPE_SHADER_TYPES; ++i) {
+                               r600_update_compressed_colortex_mask(&rctx->samplers[i].views);
+                       }
+               }
+               if (!compute_only)
+                       r600_update_compressed_colortex_mask_images(&rctx->fragment_images);
+               r600_update_compressed_colortex_mask_images(&rctx->compute_images);
+       }
+
+       /* Decompress textures if needed. */
+       for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+               struct r600_samplerview_state *views = &rctx->samplers[i].views;
+
+               if (compute_only)
+                       if (i != PIPE_SHADER_COMPUTE)
+                               continue;
+               if (views->compressed_depthtex_mask) {
+                       r600_decompress_depth_textures(rctx, views);
+               }
+               if (views->compressed_colortex_mask) {
+                       r600_decompress_color_textures(rctx, views);
+               }
+       }
+
+       {
+               struct r600_image_state *istate;
+
+               if (!compute_only) {
+                       istate = &rctx->fragment_images;
+                       if (istate->compressed_depthtex_mask)
+                               r600_decompress_depth_images(rctx, istate);
+                       if (istate->compressed_colortex_mask)
+                               r600_decompress_color_images(rctx, istate);
+               }
+
+               istate = &rctx->compute_images;
+               if (istate->compressed_depthtex_mask)
+                       r600_decompress_depth_images(rctx, istate);
+               if (istate->compressed_colortex_mask)
+                       r600_decompress_color_images(rctx, istate);
+       }
+}
+
+/* update MEM_SCRATCH buffers if needed */
+void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
+       struct r600_pipe_shader *shader, struct r600_scratch_buffer *scratch,
+       unsigned ring_base_reg, unsigned item_size_reg, unsigned ring_size_reg)
+{
+       unsigned num_ses = rctx->screen->b.info.max_se;
+       unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
+       unsigned nthreads = 128;
+
+       unsigned itemsize = shader->scratch_space_needed * 4;
+       unsigned size = align(itemsize * nthreads * num_pipes * num_ses * 4, 256);
+
+       if (scratch->dirty ||
+               unlikely(shader->scratch_space_needed != scratch->item_size ||
+               size > scratch->size)) {
+               struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+
+               scratch->dirty = false;
+
+               if (size > scratch->size) {
+                       // Release prior one if any
+                       if (scratch->buffer) {
+                               pipe_resource_reference((struct pipe_resource**)&scratch->buffer, NULL);
+                       }
+
+                       scratch->buffer = (struct r600_resource *)pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
+                               PIPE_USAGE_DEFAULT, size);
+                       if (scratch->buffer) {
+                               scratch->size = size;
+                       }
+               }
+
+               scratch->item_size = shader->scratch_space_needed;
+
+               radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+
+               // multi-SE chips need programming per SE
+               for (unsigned se = 0; se < num_ses; se++) {
+                       struct r600_resource *rbuffer = scratch->buffer;
+                       unsigned size_per_se = size / num_ses;
+
+                       // Direct to particular SE
+                       if (num_ses > 1) {
+                               radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
+                                       S_0802C_INSTANCE_INDEX(0) |
+                                       S_0802C_SE_INDEX(se) |
+                                       S_0802C_INSTANCE_BROADCAST_WRITES(1) |
+                                       S_0802C_SE_BROADCAST_WRITES(0));
+                       }
+
+                       radeon_set_config_reg(cs, ring_base_reg, (rbuffer->gpu_address + size_per_se * se) >> 8);
+                       radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+                       radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+                               RADEON_USAGE_READWRITE,
+                               RADEON_PRIO_SCRATCH_BUFFER));
+                       radeon_set_context_reg(cs, item_size_reg, itemsize);
+                       radeon_set_config_reg(cs, ring_size_reg, size_per_se >> 8);
+               }
+
+               // Restore broadcast mode
+               if (num_ses > 1) {
+                       radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
+                               S_0802C_INSTANCE_INDEX(0) |
+                               S_0802C_SE_INDEX(0) |
+                               S_0802C_INSTANCE_BROADCAST_WRITES(1) |
+                               S_0802C_SE_BROADCAST_WRITES(1));
+               }
+
+               radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+       }
+}
+
+void r600_setup_scratch_buffers(struct r600_context *rctx) {
+       static const struct {
+               unsigned ring_base;
+               unsigned item_size;
+               unsigned ring_size;
+       } regs[R600_NUM_HW_STAGES] = {
+               [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_0288BC_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
+               [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_0288B8_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
+               [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_0288B4_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
+               [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_0288B0_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE }
+       };
+
+       for (unsigned i = 0; i < R600_NUM_HW_STAGES; i++) {
+               struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
+
+               if (stage && unlikely(stage->scratch_space_needed)) {
+                       r600_setup_scratch_area_for_shader(rctx, stage,
+                               &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
+               }
+       }
+}
+
 #define SELECT_SHADER_OR_FAIL(x) do {                                  \
                r600_shader_select(ctx, rctx->x##_shader, &x##_dirty);  \
                if (unlikely(!rctx->x##_shader->current))               \
@@ -1447,30 +1786,8 @@ static bool r600_update_derived_state(struct r600_context *rctx)
        bool need_buf_const;
        struct r600_pipe_shader *clip_so_current = NULL;
 
-       if (!rctx->blitter->running) {
-               unsigned i;
-               unsigned counter;
-
-               counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter);
-               if (counter != rctx->b.last_compressed_colortex_counter) {
-                       rctx->b.last_compressed_colortex_counter = counter;
-
-                       for (i = 0; i < PIPE_SHADER_TYPES; ++i) {
-                               r600_update_compressed_colortex_mask(&rctx->samplers[i].views);
-                       }
-               }
-
-               /* Decompress textures if needed. */
-               for (i = 0; i < PIPE_SHADER_TYPES; i++) {
-                       struct r600_samplerview_state *views = &rctx->samplers[i].views;
-                       if (views->compressed_depthtex_mask) {
-                               r600_decompress_depth_textures(rctx, views);
-                       }
-                       if (views->compressed_colortex_mask) {
-                               r600_decompress_color_textures(rctx, views);
-                       }
-               }
-       }
+       if (!rctx->blitter->running)
+               r600_update_compressed_resource_state(rctx, false);
 
        SELECT_SHADER_OR_FAIL(ps);
 
@@ -1543,6 +1860,21 @@ static bool r600_update_derived_state(struct r600_context *rctx)
                }
        }
 
+       /*
+        * XXX: I believe there's some fatal flaw in the dirty state logic when
+        * enabling/disabling tes.
+        * VS/ES share all buffer/resource/sampler slots. If TES is enabled,
+        * it will therefore overwrite the VS slots. If it now gets disabled,
+        * the VS needs to rebind all buffer/resource/sampler slots - not only
+        * has TES overwritten the corresponding slots, but when the VS was
+        * operating as LS the things with correpsonding dirty bits got bound
+        * to LS slots and won't reflect what is dirty as VS stage even if the
+        * TES didn't overwrite it. The story for re-enabled TES is similar.
+        * In any case, we're not allowed to submit any TES state when
+        * TES is disabled (the state tracker may not do this but this looks
+        * like an optimization to me, not something which can be relied on).
+        */
+
        /* Update clip misc state. */
        if (clip_so_current) {
                r600_update_clip_state(rctx, clip_so_current);
@@ -1553,8 +1885,10 @@ static bool r600_update_derived_state(struct r600_context *rctx)
                rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable ||
                rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)) {
 
-               if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
+               if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs ||
+                   rctx->cb_misc_state.ps_color_export_mask != rctx->ps_shader->current->ps_color_export_mask) {
                        rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
+                       rctx->cb_misc_state.ps_color_export_mask = rctx->ps_shader->current->ps_color_export_mask;
                        r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
                }
 
@@ -1587,6 +1921,13 @@ static bool r600_update_derived_state(struct r600_context *rctx)
                r600_update_db_shader_control(rctx);
        }
 
+       /* For each shader stage that needs to spill, set up buffer for MEM_SCRATCH */
+       if (rctx->b.chip_class >= EVERGREEN) {
+               evergreen_setup_scratch_buffers(rctx);
+       } else {
+               r600_setup_scratch_buffers(rctx);
+       }
+
        /* on R600 we stuff masks + txq info into one constant buffer */
        /* on evergreen we only need a txq info one */
        if (rctx->ps_shader) {
@@ -1619,7 +1960,23 @@ static bool r600_update_derived_state(struct r600_context *rctx)
                }
        }
 
-       r600_update_driver_const_buffers(rctx);
+       if (rctx->tes_shader) {
+               assert(rctx->b.chip_class >= EVERGREEN);
+               need_buf_const = rctx->tes_shader->current->shader.uses_tex_buffers ||
+                                rctx->tes_shader->current->shader.has_txq_cube_array_z_comp;
+               if (need_buf_const) {
+                       eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_EVAL);
+               }
+               if (rctx->tcs_shader) {
+                       need_buf_const = rctx->tcs_shader->current->shader.uses_tex_buffers ||
+                                        rctx->tcs_shader->current->shader.has_txq_cube_array_z_comp;
+                       if (need_buf_const) {
+                               eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_CTRL);
+                       }
+               }
+       }
+
+       r600_update_driver_const_buffers(rctx, false);
 
        if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) {
                if (!r600_adjust_gprs(rctx)) {
@@ -1650,7 +2007,7 @@ static bool r600_update_derived_state(struct r600_context *rctx)
 
 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_clip_misc_state *state = &rctx->clip_misc_state;
 
        radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
@@ -1659,35 +2016,76 @@ void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom
                                S_028810_CLIP_DISABLE(state->clip_disable));
        radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
                               state->pa_cl_vs_out_cntl |
-                              (state->clip_plane_enable & state->clip_dist_write));
+                              (state->clip_plane_enable & state->clip_dist_write) |
+                              (state->cull_dist_write << 8));
        /* reuse needs to be set off if we write oViewport */
        if (rctx->b.chip_class >= EVERGREEN)
                radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF,
                                       S_028AB4_REUSE_OFF(state->vs_out_viewport));
 }
 
+/* rast_prim is the primitive type after GS. */
+static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
+{
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+       enum pipe_prim_type rast_prim = rctx->current_rast_prim;
+
+       /* Skip this if not rendering lines. */
+       if (rast_prim != PIPE_PRIM_LINES &&
+           rast_prim != PIPE_PRIM_LINE_LOOP &&
+           rast_prim != PIPE_PRIM_LINE_STRIP &&
+           rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
+           rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
+               return;
+
+       if (rast_prim == rctx->last_rast_prim)
+               return;
+
+       /* For lines, reset the stipple pattern at each primitive. Otherwise,
+        * reset the stipple pattern at each packet (line strips, line loops).
+        */
+       radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+                              S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2) |
+                              (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
+       rctx->last_rast_prim = rast_prim;
+}
+
 static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
-       struct pipe_index_buffer ib = {};
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
+       bool has_user_indices = info->has_user_indices;
        uint64_t mask;
-       unsigned num_patches, dirty_tex_counter;
+       unsigned num_patches, dirty_tex_counter, index_offset = 0;
+       unsigned index_size = info->index_size;
        int index_bias;
+       struct r600_shader_atomic combined_atomics[8];
+       uint8_t atomic_used_mask;
 
-       if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) {
+       if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
                return;
        }
 
-       if (unlikely(!rctx->vs_shader || !rctx->ps_shader)) {
+       if (unlikely(!rctx->vs_shader)) {
+               assert(0);
+               return;
+       }
+       if (unlikely(!rctx->ps_shader &&
+                    (!rctx->rasterizer || !rctx->rasterizer->rasterizer_discard))) {
                assert(0);
                return;
        }
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
-               rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+       }
+
+       if (rctx->cmd_buf_is_compute) {
+               rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+               rctx->cmd_buf_is_compute = false;
        }
 
        /* Re-emit the framebuffer state if needed. */
@@ -1695,8 +2093,23 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
        if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) {
                rctx->b.last_dirty_tex_counter = dirty_tex_counter;
                r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
+               rctx->framebuffer.do_update_surf_dirtiness = true;
        }
 
+       if (rctx->gs_shader) {
+               /* Determine whether the GS triangle strip adjacency fix should
+                * be applied. Rotate every other triangle if
+                * - triangle strips with adjacency are fed to the GS and
+                * - primitive restart is disabled (the rotation doesn't help
+                *   when the restart occurs after an odd number of triangles).
+                */
+               bool gs_tri_strip_adj_fix =
+                       !rctx->tes_shader &&
+                       info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
+                       !info->primitive_restart;
+               if (gs_tri_strip_adj_fix != rctx->gs_tri_strip_adj_fix)
+                       rctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
+       }
        if (!r600_update_derived_state(rctx)) {
                /* useless to render because current rendering command
                 * can't be achieved
@@ -1704,18 +2117,19 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                return;
        }
 
-       if (info->indexed) {
-               /* Initialize the index buffer struct. */
-               pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
-               ib.user_buffer = rctx->index_buffer.user_buffer;
-               ib.index_size = rctx->index_buffer.index_size;
-               ib.offset = rctx->index_buffer.offset;
-               if (!info->indirect) {
-                       ib.offset += info->start * ib.index_size;
-               }
+       rctx->current_rast_prim = (rctx->gs_shader)? rctx->gs_shader->gs_output_prim
+               : (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
+               : info->mode;
+
+       if (rctx->b.chip_class >= EVERGREEN) {
+               evergreen_emit_atomic_buffer_setup_count(rctx, NULL, combined_atomics, &atomic_used_mask);
+       }
+
+       if (index_size) {
+               index_offset += info->start * index_size;
 
                /* Translate 8-bit indices to 16-bit. */
-               if (unlikely(ib.index_size == 1)) {
+               if (unlikely(index_size == 1)) {
                        struct pipe_resource *out_buffer = NULL;
                        unsigned out_offset;
                        void *ptr;
@@ -1727,12 +2141,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                        }
                        else {
                                /* Have to get start/count from indirect buffer, slow path ahead... */
-                               struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
+                               struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
                                unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
                                        PIPE_TRANSFER_READ);
                                if (data) {
-                                       data += info->indirect_offset / sizeof(unsigned);
-                                       start = data[2] * ib.index_size;
+                                       data += info->indirect->offset / sizeof(unsigned);
+                                       start = data[2] * index_size;
                                        count = data[0];
                                }
                                else {
@@ -1743,19 +2157,16 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
 
                        u_upload_alloc(ctx->stream_uploader, start, count * 2,
                                        256, &out_offset, &out_buffer, &ptr);
-                       if (unlikely(!ptr)) {
-                               pipe_resource_reference(&ib.buffer, NULL);
+                       if (unlikely(!ptr))
                                return;
-                       }
 
                        util_shorten_ubyte_elts_to_userptr(
-                                               &rctx->b.b, &ib, 0, 0, ib.offset + start, count, ptr);
+                                               &rctx->b.b, info, 0, 0, index_offset, count, ptr);
 
-                       pipe_resource_reference(&ib.buffer, NULL);
-                       ib.user_buffer = NULL;
-                       ib.buffer = out_buffer;
-                       ib.offset = out_offset;
-                       ib.index_size = 2;
+                       indexbuf = out_buffer;
+                       index_offset = out_offset;
+                       index_size = 2;
+                       has_user_indices = false;
                }
 
                /* Upload the index buffer.
@@ -1763,13 +2174,14 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
                 * Indirect draws never use immediate indices.
                 * Note: Instanced rendering in combination with immediate indices hangs. */
-               if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect ||
+               if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
                                                 info->instance_count > 1 ||
-                                                info->count*ib.index_size > 20)) {
+                                                info->count*index_size > 20)) {
+                       indexbuf = NULL;
                        u_upload_data(ctx->stream_uploader, 0,
-                                      info->count * ib.index_size, 256,
-                                     ib.user_buffer, &ib.offset, &ib.buffer);
-                       ib.user_buffer = NULL;
+                                      info->count * index_size, 256,
+                                     info->index.user, &index_offset, &indexbuf);
+                       has_user_indices = false;
                }
                index_bias = info->index_bias;
        } else {
@@ -1797,7 +2209,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                evergreen_setup_tess_constants(rctx, info, &num_patches);
 
        /* Emit states. */
-       r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
+       r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE, util_bitcount(atomic_used_mask));
        r600_flush_emit(rctx);
 
        mask = rctx->dirty_atoms;
@@ -1805,6 +2217,10 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]);
        }
 
+       if (rctx->b.chip_class >= EVERGREEN) {
+               evergreen_emit_atomic_buffer_setup(rctx, false, combined_atomics, atomic_used_mask);
+       }
+               
        if (rctx->b.chip_class == CAYMAN) {
                /* Copied from radeonsi. */
                unsigned primgroup_size = 128; /* recommended without a GS */
@@ -1863,17 +2279,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
 
        /* Update the primitive type. */
        if (rctx->last_primitive_type != info->mode) {
-               unsigned ls_mask = 0;
-
-               if (info->mode == PIPE_PRIM_LINES)
-                       ls_mask = 1;
-               else if (info->mode == PIPE_PRIM_LINE_STRIP ||
-                        info->mode == PIPE_PRIM_LINE_LOOP)
-                       ls_mask = 2;
-
-               radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
-                                      S_028A0C_AUTO_RESET_CNTL(ls_mask) |
-                                      (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
+               r600_emit_rasterizer_prim_state(rctx);
                radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
                                      r600_conv_pipe_prim(info->mode));
 
@@ -1885,7 +2291,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
                radeon_emit(cs, info->instance_count);
        } else {
-               uint64_t va = r600_resource(info->indirect)->gpu_address;
+               uint64_t va = r600_resource(info->indirect->buffer)->gpu_address;
                assert(rctx->b.chip_class >= EVERGREEN);
 
                // Invalidate so non-indirect draw calls reset this state
@@ -1899,26 +2305,26 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
-                                                         (struct r600_resource*)info->indirect,
+                                                         (struct r600_resource*)info->indirect->buffer,
                                                          RADEON_USAGE_READ,
                                                           RADEON_PRIO_DRAW_INDIRECT));
        }
 
-       if (info->indexed) {
+       if (index_size) {
                radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
-               radeon_emit(cs, ib.index_size == 4 ?
+               radeon_emit(cs, index_size == 4 ?
                                (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
                                (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
 
-               if (ib.user_buffer) {
-                       unsigned size_bytes = info->count*ib.index_size;
+               if (has_user_indices) {
+                       unsigned size_bytes = info->count*index_size;
                        unsigned size_dw = align(size_bytes, 4) / 4;
                        radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
                        radeon_emit(cs, info->count);
                        radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
-                       radeon_emit_array(cs, ib.user_buffer, size_dw);
+                       radeon_emit_array(cs, info->index.user, size_dw);
                } else {
-                       uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
+                       uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
 
                        if (likely(!info->indirect)) {
                                radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
@@ -1928,12 +2334,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                                radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
                                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                                radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
-                                                                         (struct r600_resource*)ib.buffer,
+                                                                         (struct r600_resource*)indexbuf,
                                                                          RADEON_USAGE_READ,
                                                                           RADEON_PRIO_INDEX_BUFFER));
                        }
                        else {
-                               uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size;
+                               uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
 
                                radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
                                radeon_emit(cs, va);
@@ -1941,7 +2347,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
 
                                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                                radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
-                                                                         (struct r600_resource*)ib.buffer,
+                                                                         (struct r600_resource*)indexbuf,
                                                                          RADEON_USAGE_READ,
                                                                           RADEON_PRIO_INDEX_BUFFER));
 
@@ -1949,7 +2355,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                                radeon_emit(cs, max_size);
 
                                radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
-                               radeon_emit(cs, info->indirect_offset);
+                               radeon_emit(cs, info->indirect->offset);
                                radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
                        }
                }
@@ -1979,7 +2385,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                }
                else {
                        radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit));
-                       radeon_emit(cs, info->indirect_offset);
+                       radeon_emit(cs, info->indirect->offset);
                }
                radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
                                (info->count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0));
@@ -2003,32 +2409,43 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
                radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT));
        }
 
-       /* Set the depth buffer as dirty. */
-       if (rctx->framebuffer.state.zsbuf) {
-               struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
-               struct r600_texture *rtex = (struct r600_texture *)surf->texture;
 
-               rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+       if (rctx->b.chip_class >= EVERGREEN)
+               evergreen_emit_atomic_buffer_save(rctx, false, combined_atomics, &atomic_used_mask);
 
-               if (rtex->surface.flags & RADEON_SURF_SBUFFER)
-                       rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
-       }
-       if (rctx->framebuffer.compressed_cb_mask) {
-               struct pipe_surface *surf;
-               struct r600_texture *rtex;
-               unsigned mask = rctx->framebuffer.compressed_cb_mask;
+       if (rctx->trace_buf)
+               eg_trace_emit(rctx);
 
-               do {
-                       unsigned i = u_bit_scan(&mask);
-                       surf = rctx->framebuffer.state.cbufs[i];
-                       rtex = (struct r600_texture*)surf->texture;
+       if (rctx->framebuffer.do_update_surf_dirtiness) {
+               /* Set the depth buffer as dirty. */
+               if (rctx->framebuffer.state.zsbuf) {
+                       struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
+                       struct r600_texture *rtex = (struct r600_texture *)surf->texture;
 
                        rtex->dirty_level_mask |= 1 << surf->u.tex.level;
 
-               } while (mask);
+                       if (rtex->surface.has_stencil)
+                               rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
+               }
+               if (rctx->framebuffer.compressed_cb_mask) {
+                       struct pipe_surface *surf;
+                       struct r600_texture *rtex;
+                       unsigned mask = rctx->framebuffer.compressed_cb_mask;
+
+                       do {
+                               unsigned i = u_bit_scan(&mask);
+                               surf = rctx->framebuffer.state.cbufs[i];
+                               rtex = (struct r600_texture*)surf->texture;
+
+                               rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+
+                       } while (mask);
+               }
+               rctx->framebuffer.do_update_surf_dirtiness = false;
        }
 
-       pipe_resource_reference(&ib.buffer, NULL);
+       if (index_size && indexbuf != info->index.resource)
+               pipe_resource_reference(&indexbuf, NULL);
        rctx->b.num_draw_calls++;
 }
 
@@ -2157,7 +2574,7 @@ bool sampler_state_needs_border_color(const struct pipe_sampler_state *state)
 void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
 {
 
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader;
 
        if (!shader)
@@ -2261,6 +2678,8 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
                format = PIPE_FORMAT_A4R4_UNORM;
 
        desc = util_format_description(format);
+       if (!desc)
+               goto out_unknown;
 
        /* Depth and stencil swizzling is handled separately. */
        if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) {
@@ -2380,10 +2799,6 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
        }
 
        if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
-               if (!util_format_s3tc_enabled) {
-                       goto out_unknown;
-               }
-
                switch (format) {
                case PIPE_FORMAT_DXT1_RGB:
                case PIPE_FORMAT_DXT1_RGBA:
@@ -2538,6 +2953,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen,
                        switch (desc->nr_channels) {
                        case 1:
                                result = FMT_8;
+                               is_srgb_valid = TRUE;
                                goto out_word4;
                        case 2:
                                result = FMT_8_8;
@@ -2627,6 +3043,8 @@ uint32_t r600_translate_colorformat(enum chip_class chip, enum pipe_format forma
        const struct util_format_description *desc = util_format_description(format);
        int channel = util_format_get_first_non_void_channel(format);
        bool is_float;
+       if (!desc)
+               return ~0U;
 
 #define HAS_SIZE(x,y,z,w) \
        (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
@@ -2802,7 +3220,7 @@ static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resourc
        mask = rctx->vertex_buffer_state.enabled_mask;
        while (mask) {
                i = u_bit_scan(&mask);
-               if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
+               if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) {
                        rctx->vertex_buffer_state.dirty_mask |= 1 << i;
                        r600_vertex_buffers_dirty(rctx);
                }
@@ -2865,9 +3283,27 @@ static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resourc
                        r600_sampler_views_dirty(rctx, state);
                }
        }
+
+       /* SSBOs */
+       struct r600_image_state *istate = &rctx->fragment_buffers;
+       {
+               uint32_t mask = istate->enabled_mask;
+               bool found = false;
+               while (mask) {
+                       unsigned i = u_bit_scan(&mask);
+                       if (istate->views[i].base.resource == &rbuffer->b.b) {
+                               found = true;
+                               istate->dirty_mask |= 1 << i;
+                       }
+               }
+               if (found) {
+                       r600_mark_atom_dirty(rctx, &istate->atom);
+               }
+       }
+
 }
 
-static void r600_set_active_query_state(struct pipe_context *ctx, boolean enable)
+static void r600_set_active_query_state(struct pipe_context *ctx, bool enable)
 {
        struct r600_context *rctx = (struct r600_context*)ctx;
 
@@ -2887,17 +3323,10 @@ static void r600_set_active_query_state(struct pipe_context *ctx, boolean enable
        }
 }
 
-static void r600_set_occlusion_query_state(struct pipe_context *ctx, bool enable)
-{
-       struct r600_context *rctx = (struct r600_context*)ctx;
-
-       r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
-}
-
 static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw,
                                    bool include_draw_vbo)
 {
-       r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo);
+       r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo, 0);
 }
 
 /* keep this at the end of this file, please */
@@ -2935,14 +3364,14 @@ void r600_init_common_state_functions(struct r600_context *rctx)
        rctx->b.b.set_sample_mask = r600_set_sample_mask;
        rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
        rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
-       rctx->b.b.set_index_buffer = r600_set_index_buffer;
        rctx->b.b.set_sampler_views = r600_set_sampler_views;
        rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
+       rctx->b.b.memory_barrier = r600_memory_barrier;
        rctx->b.b.texture_barrier = r600_texture_barrier;
        rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;
        rctx->b.b.set_active_query_state = r600_set_active_query_state;
+
        rctx->b.b.draw_vbo = r600_draw_vbo;
        rctx->b.invalidate_buffer = r600_invalidate_buffer;
-       rctx->b.set_occlusion_query_state = r600_set_occlusion_query_state;
        rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space;
 }