r600g: cayman initial integer support
[mesa.git] / src / gallium / drivers / r600 / r600_state_common.c
index 4a2c7fe935c50e699244653b1d761fef568dacc2..6a313096f94918ed6ed5213439ad44dd51524d2d 100644 (file)
  * Authors: Dave Airlie <airlied@redhat.com>
  *          Jerome Glisse <jglisse@redhat.com>
  */
-#include <util/u_memory.h>
-#include <util/u_format.h>
-#include <pipebuffer/pb_buffer.h>
+#include "util/u_blitter.h"
+#include "util/u_memory.h"
+#include "util/u_format.h"
+#include "pipebuffer/pb_buffer.h"
 #include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "r600_formats.h"
 #include "r600_pipe.h"
 #include "r600d.h"
 
+static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
+{
+       static const int prim_conv[] = {
+               V_008958_DI_PT_POINTLIST,
+               V_008958_DI_PT_LINELIST,
+               V_008958_DI_PT_LINELOOP,
+               V_008958_DI_PT_LINESTRIP,
+               V_008958_DI_PT_TRILIST,
+               V_008958_DI_PT_TRISTRIP,
+               V_008958_DI_PT_TRIFAN,
+               V_008958_DI_PT_QUADLIST,
+               V_008958_DI_PT_QUADSTRIP,
+               V_008958_DI_PT_POLYGON,
+               -1,
+               -1,
+               -1,
+               -1
+       };
+
+       *prim = prim_conv[pprim];
+       if (*prim == -1) {
+               fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
+               return false;
+       }
+       return true;
+}
+
 /* common state between evergreen and r600 */
 void r600_bind_blend_state(struct pipe_context *ctx, void *state)
 {
@@ -46,6 +76,21 @@ void r600_bind_blend_state(struct pipe_context *ctx, void *state)
        r600_context_pipe_state_set(&rctx->ctx, rstate);
 }
 
+void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
+{
+       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+       struct r600_pipe_dsa *dsa = state;
+       struct r600_pipe_state *rstate;
+
+       if (state == NULL)
+               return;
+       rstate = &dsa->rstate;
+       rctx->states[rstate->id] = rstate;
+       rctx->alpha_ref = dsa->alpha_ref;
+       rctx->alpha_ref_dirty = true;
+       r600_context_pipe_state_set(&rctx->ctx, rstate);
+}
+
 void r600_bind_rs_state(struct pipe_context *ctx, void *state)
 {
        struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
@@ -54,14 +99,18 @@ void r600_bind_rs_state(struct pipe_context *ctx, void *state)
        if (state == NULL)
                return;
 
-       rctx->flatshade = rs->flatshade;
+       rctx->clamp_vertex_color = rs->clamp_vertex_color;
+       rctx->clamp_fragment_color = rs->clamp_fragment_color;
+
        rctx->sprite_coord_enable = rs->sprite_coord_enable;
+       rctx->two_side = rs->two_side;
+
        rctx->rasterizer = rs;
 
        rctx->states[rs->rstate.id] = &rs->rstate;
        r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
 
-       if (rctx->family >= CHIP_CEDAR) {
+       if (rctx->chip_class >= EVERGREEN) {
                evergreen_polygon_offset_update(rctx);
        } else {
                r600_polygon_offset_update(rctx);
@@ -91,17 +140,6 @@ void r600_sampler_view_destroy(struct pipe_context *ctx,
        FREE(resource);
 }
 
-void r600_bind_state(struct pipe_context *ctx, void *state)
-{
-       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
-
-       if (state == NULL)
-               return;
-       rctx->states[rstate->id] = rstate;
-       r600_context_pipe_state_set(&rctx->ctx, rstate);
-}
-
 void r600_delete_state(struct pipe_context *ctx, void *state)
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
@@ -111,7 +149,7 @@ void r600_delete_state(struct pipe_context *ctx, void *state)
                rctx->states[rstate->id] = NULL;
        }
        for (int i = 0; i < rstate->nregs; i++) {
-               r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
+               pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
        }
        free(rstate);
 }
@@ -123,6 +161,9 @@ void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
 
        rctx->vertex_elements = v;
        if (v) {
+               u_vbuf_bind_vertex_elements(rctx->vbuf_mgr, state,
+                                               v->vmgr_elements);
+
                rctx->states[v->rstate.id] = &v->rstate;
                r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
        }
@@ -139,7 +180,8 @@ void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
        if (rctx->vertex_elements == state)
                rctx->vertex_elements = NULL;
 
-       r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL);
+       pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
+       u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
        FREE(state);
 }
 
@@ -149,140 +191,51 @@ void r600_set_index_buffer(struct pipe_context *ctx,
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
 
-       if (ib) {
-               pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
-               memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer));
-       } else {
-               pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
-               memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer));
-       }
-
-       /* TODO make this more like a state */
+       u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
 }
 
 void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
                             const struct pipe_vertex_buffer *buffers)
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct pipe_vertex_buffer *vbo;
-       unsigned max_index = ~0;
        int i;
 
+       /* Zero states. */
        for (i = 0; i < count; i++) {
-               vbo = (struct pipe_vertex_buffer*)&buffers[i];
-
-               pipe_resource_reference(&rctx->vertex_buffer[i].buffer, vbo->buffer);
-               pipe_resource_reference(&rctx->real_vertex_buffer[i], NULL);
-
-               if (!vbo->buffer) {
-                       /* Zero states. */
-                       if (rctx->family >= CHIP_CEDAR) {
+               if (!buffers[i].buffer) {
+                       if (rctx->chip_class >= EVERGREEN) {
                                evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
                        } else {
                                r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
                        }
-                       continue;
-               }
-
-               if (r600_is_user_buffer(vbo->buffer)) {
-                       rctx->any_user_vbs = TRUE;
-                       continue;
-               }
-
-               pipe_resource_reference(&rctx->real_vertex_buffer[i], vbo->buffer);
-
-               /* The stride of zero means we will be fetching only the first
-                * vertex, so don't care about max_index. */
-               if (!vbo->stride) {
-                       continue;
-               }
-
-               /* Update the maximum index. */
-               {
-                   unsigned vbo_max_index =
-                         (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride;
-                   max_index = MIN2(max_index, vbo_max_index);
                }
        }
-
-       for (; i < rctx->nreal_vertex_buffers; i++) {
-               pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL);
-               pipe_resource_reference(&rctx->real_vertex_buffer[i], NULL);
-
-               /* Zero states. */
-               if (rctx->family >= CHIP_CEDAR) {
+       for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
+               if (rctx->chip_class >= EVERGREEN) {
                        evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
                } else {
                        r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
                }
        }
 
-       memcpy(rctx->vertex_buffer, buffers, sizeof(struct pipe_vertex_buffer) * count);
-
-       rctx->nvertex_buffers = count;
-       rctx->nreal_vertex_buffers = count;
-       rctx->vb_max_index = max_index;
+       u_vbuf_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
 }
 
-
-#define FORMAT_REPLACE(what, withwhat) \
-       case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break
-
 void *r600_create_vertex_elements(struct pipe_context *ctx,
                                  unsigned count,
                                  const struct pipe_vertex_element *elements)
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
        struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
-       enum pipe_format *format;
-       int i;
 
        assert(count < 32);
        if (!v)
                return NULL;
 
        v->count = count;
-       memcpy(v->elements, elements, count * sizeof(struct pipe_vertex_element));
-
-       for (i = 0; i < count; i++) {
-               v->hw_format[i] = v->elements[i].src_format;
-               format = &v->hw_format[i];
-
-               switch (*format) {
-               FORMAT_REPLACE(R64_FLOAT,           R32_FLOAT);
-               FORMAT_REPLACE(R64G64_FLOAT,        R32G32_FLOAT);
-               FORMAT_REPLACE(R64G64B64_FLOAT,     R32G32B32_FLOAT);
-               FORMAT_REPLACE(R64G64B64A64_FLOAT,  R32G32B32A32_FLOAT);
-
-               /* r600 doesn't seem to support 32_*SCALED, these formats
-                * aren't in D3D10 either. */
-               FORMAT_REPLACE(R32_UNORM,           R32_FLOAT);
-               FORMAT_REPLACE(R32G32_UNORM,        R32G32_FLOAT);
-               FORMAT_REPLACE(R32G32B32_UNORM,     R32G32B32_FLOAT);
-               FORMAT_REPLACE(R32G32B32A32_UNORM,  R32G32B32A32_FLOAT);
-
-               FORMAT_REPLACE(R32_USCALED,         R32_FLOAT);
-               FORMAT_REPLACE(R32G32_USCALED,      R32G32_FLOAT);
-               FORMAT_REPLACE(R32G32B32_USCALED,   R32G32B32_FLOAT);
-               FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT);
-
-               FORMAT_REPLACE(R32_SNORM,           R32_FLOAT);
-               FORMAT_REPLACE(R32G32_SNORM,        R32G32_FLOAT);
-               FORMAT_REPLACE(R32G32B32_SNORM,     R32G32B32_FLOAT);
-               FORMAT_REPLACE(R32G32B32A32_SNORM,  R32G32B32A32_FLOAT);
-
-               FORMAT_REPLACE(R32_SSCALED,         R32_FLOAT);
-               FORMAT_REPLACE(R32G32_SSCALED,      R32G32_FLOAT);
-               FORMAT_REPLACE(R32G32B32_SSCALED,   R32G32B32_FLOAT);
-               FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT);
-               default:;
-               }
-               v->incompatible_layout =
-                       v->incompatible_layout ||
-                       v->elements[i].src_format != v->hw_format[i];
-
-               v->hw_format_size[i] = align(util_format_get_blocksize(v->hw_format[i]), 4);
-       }
+       v->vmgr_elements =
+               u_vbuf_create_vertex_elements(rctx->vbuf_mgr, count,
+                                                 elements, v->elements);
 
        if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
                FREE(v);
@@ -295,10 +248,13 @@ void *r600_create_vertex_elements(struct pipe_context *ctx,
 void *r600_create_shader_state(struct pipe_context *ctx,
                               const struct pipe_shader_state *state)
 {
-       struct r600_pipe_shader *shader =  CALLOC_STRUCT(r600_pipe_shader);
+       struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
        int r;
 
-       r =  r600_pipe_shader_create(ctx, shader, state->tokens);
+       shader->tokens = tgsi_dup_tokens(state->tokens);
+       shader->so = state->stream_output;
+
+       r =  r600_pipe_shader_create(ctx, shader);
        if (r) {
                return NULL;
        }
@@ -314,6 +270,9 @@ void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
        if (state) {
                r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
        }
+       if (rctx->ps_shader && rctx->vs_shader) {
+               r600_adjust_gprs(rctx);
+       }
 }
 
 void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
@@ -325,6 +284,9 @@ void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
        if (state) {
                r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
        }
+       if (rctx->ps_shader && rctx->vs_shader) {
+               r600_adjust_gprs(rctx);
+       }
 }
 
 void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
@@ -336,6 +298,7 @@ void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
                rctx->ps_shader = NULL;
        }
 
+       free(shader->tokens);
        r600_pipe_shader_destroy(ctx, shader);
        free(shader);
 }
@@ -349,51 +312,33 @@ void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
                rctx->vs_shader = NULL;
        }
 
+       free(shader->tokens);
        r600_pipe_shader_destroy(ctx, shader);
        free(shader);
 }
 
-/* FIXME optimize away spi update when it's not needed */
-void r600_spi_update(struct r600_pipe_context *rctx)
+static void r600_update_alpha_ref(struct r600_pipe_context *rctx)
 {
-       struct r600_pipe_shader *shader = rctx->ps_shader;
+       unsigned alpha_ref;
        struct r600_pipe_state rstate;
-       struct r600_shader *rshader = &shader->shader;
-       unsigned i, tmp;
 
+       alpha_ref = rctx->alpha_ref;
        rstate.nregs = 0;
-       for (i = 0; i < rshader->ninput; i++) {
-               tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
-
-               if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
-                   rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
-                   rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
-                       tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
-               }
-
-               if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
-                   rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
-                       tmp |= S_028644_PT_SPRITE_TEX(1);
-               }
-
-                if (rctx->family < CHIP_CEDAR) {
-                    if (rshader->input[i].centroid)
-                            tmp |= S_028644_SEL_CENTROID(1);
-
-                    if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
-                            tmp |= S_028644_SEL_LINEAR(1);
-                }
+       if (rctx->export_16bpc)
+               alpha_ref &= ~0x1FFF;
+       r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL, 0);
 
-               r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL);
-       }
        r600_context_pipe_state_set(&rctx->ctx, &rstate);
+       rctx->alpha_ref_dirty = false;
 }
 
 void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
                              struct pipe_resource *buffer)
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct r600_resource_buffer *rbuffer = r600_buffer(buffer);
+       struct r600_resource *rbuffer = r600_resource(buffer);
+       struct r600_pipe_resource_state *rstate;
+       uint64_t va_offset;
        uint32_t offset;
 
        /* Note that the state tracker can unbind constant buffers by
@@ -404,203 +349,435 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
        }
 
        r600_upload_const_buffer(rctx, &rbuffer, &offset);
+       va_offset = r600_resource_va(ctx->screen, (void*)rbuffer);
+       va_offset += offset;
+       va_offset >>= 8;
 
        switch (shader) {
        case PIPE_SHADER_VERTEX:
                rctx->vs_const_buffer.nregs = 0;
                r600_pipe_state_add_reg(&rctx->vs_const_buffer,
-                                       R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
+                                       R_028180_ALU_CONST_BUFFER_SIZE_VS_0 + index * 4,
                                        ALIGN_DIVUP(buffer->width0 >> 4, 16),
-                                       0xFFFFFFFF, NULL);
+                                       0xFFFFFFFF, NULL, 0);
                r600_pipe_state_add_reg(&rctx->vs_const_buffer,
-                                       R_028980_ALU_CONST_CACHE_VS_0,
-                                       (r600_bo_offset(rbuffer->r.bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->r.bo);
+                                       R_028980_ALU_CONST_CACHE_VS_0 + index * 4,
+                                       va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
                r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
+
+               rstate = &rctx->vs_const_buffer_resource[index];
+               if (!rstate->id) {
+                       if (rctx->chip_class >= EVERGREEN) {
+                               evergreen_pipe_init_buffer_resource(rctx, rstate);
+                       } else {
+                               r600_pipe_init_buffer_resource(rctx, rstate);
+                       }
+               }
+
+               if (rctx->chip_class >= EVERGREEN) {
+                       evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
+                       evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
+               } else {
+                       r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
+                       r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
+               }
                break;
        case PIPE_SHADER_FRAGMENT:
                rctx->ps_const_buffer.nregs = 0;
                r600_pipe_state_add_reg(&rctx->ps_const_buffer,
                                        R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
                                        ALIGN_DIVUP(buffer->width0 >> 4, 16),
-                                       0xFFFFFFFF, NULL);
+                                       0xFFFFFFFF, NULL, 0);
                r600_pipe_state_add_reg(&rctx->ps_const_buffer,
                                        R_028940_ALU_CONST_CACHE_PS_0,
-                                       (r600_bo_offset(rbuffer->r.bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->r.bo);
+                                       va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
                r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
+
+               rstate = &rctx->ps_const_buffer_resource[index];
+               if (!rstate->id) {
+                       if (rctx->chip_class >= EVERGREEN) {
+                               evergreen_pipe_init_buffer_resource(rctx, rstate);
+                       } else {
+                               r600_pipe_init_buffer_resource(rctx, rstate);
+                       }
+               }
+               if (rctx->chip_class >= EVERGREEN) {
+                       evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
+                       evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
+               } else {
+                       r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
+                       r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
+               }
                break;
        default:
                R600_ERR("unsupported %d\n", shader);
                return;
        }
 
-       if (!rbuffer->user_buffer)
+       if (buffer != &rbuffer->b.b.b)
                pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
 }
 
+struct pipe_stream_output_target *
+r600_create_so_target(struct pipe_context *ctx,
+                     struct pipe_resource *buffer,
+                     unsigned buffer_offset,
+                     unsigned buffer_size)
+{
+       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+       struct r600_so_target *t;
+       void *ptr;
+
+       t = CALLOC_STRUCT(r600_so_target);
+       if (!t) {
+               return NULL;
+       }
+
+       t->b.reference.count = 1;
+       t->b.context = ctx;
+       pipe_resource_reference(&t->b.buffer, buffer);
+       t->b.buffer_offset = buffer_offset;
+       t->b.buffer_size = buffer_size;
+
+       t->filled_size = (struct r600_resource*)
+               pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
+       ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
+       memset(ptr, 0, t->filled_size->buf->size);
+       rctx->ws->buffer_unmap(t->filled_size->buf);
+
+       return &t->b;
+}
+
+void r600_so_target_destroy(struct pipe_context *ctx,
+                           struct pipe_stream_output_target *target)
+{
+       struct r600_so_target *t = (struct r600_so_target*)target;
+       pipe_resource_reference(&t->b.buffer, NULL);
+       pipe_resource_reference((struct pipe_resource**)&t->filled_size, NULL);
+       FREE(t);
+}
+
+void r600_set_so_targets(struct pipe_context *ctx,
+                        unsigned num_targets,
+                        struct pipe_stream_output_target **targets,
+                        unsigned append_bitmask)
+{
+       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+       unsigned i;
+
+       /* Stop streamout. */
+       if (rctx->ctx.num_so_targets) {
+               r600_context_streamout_end(&rctx->ctx);
+       }
+
+       /* Set the new targets. */
+       for (i = 0; i < num_targets; i++) {
+               pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], targets[i]);
+       }
+       for (; i < rctx->ctx.num_so_targets; i++) {
+               pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], NULL);
+       }
+
+       rctx->ctx.num_so_targets = num_targets;
+       rctx->ctx.streamout_start = num_targets != 0;
+       rctx->ctx.streamout_append_bitmask = append_bitmask;
+}
+
 static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
 {
-       struct r600_pipe_state *rstate;
+       struct r600_pipe_resource_state *rstate;
        struct r600_resource *rbuffer;
        struct pipe_vertex_buffer *vertex_buffer;
-       unsigned i, offset;
+       unsigned i, count, offset;
 
        if (rctx->vertex_elements->vbuffer_need_offset) {
                /* one resource per vertex elements */
-               rctx->nvs_resource = rctx->vertex_elements->count;
+               count = rctx->vertex_elements->count;
        } else {
                /* bind vertex buffer once */
-               rctx->nvs_resource = rctx->nreal_vertex_buffers;
+               count = rctx->vbuf_mgr->nr_real_vertex_buffers;
        }
 
-       for (i = 0 ; i < rctx->nvs_resource; i++) {
-               rstate = &rctx->vs_resource[i];
-               rstate->id = R600_PIPE_STATE_RESOURCE;
-               rstate->nregs = 0;
+       for (i = 0 ; i < count; i++) {
+               rstate = &rctx->fs_resource[i];
 
                if (rctx->vertex_elements->vbuffer_need_offset) {
                        /* one resource per vertex elements */
                        unsigned vbuffer_index;
                        vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
-                       vertex_buffer = &rctx->vertex_buffer[vbuffer_index];
-                       rbuffer = (struct r600_resource*)rctx->real_vertex_buffer[vbuffer_index];
+                       vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
+                       rbuffer = (struct r600_resource*)vertex_buffer->buffer;
                        offset = rctx->vertex_elements->vbuffer_offset[i];
                } else {
                        /* bind vertex buffer once */
-                       vertex_buffer = &rctx->vertex_buffer[i];
-                       rbuffer = (struct r600_resource*)rctx->real_vertex_buffer[i];
+                       vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[i];
+                       rbuffer = (struct r600_resource*)vertex_buffer->buffer;
                        offset = 0;
                }
                if (vertex_buffer == NULL || rbuffer == NULL)
                        continue;
-               offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
+               offset += vertex_buffer->buffer_offset;
 
-               if (rctx->family >= CHIP_CEDAR) {
-                       evergreen_pipe_add_vertex_attrib(rctx, rstate, i,
-                                                        rbuffer, offset,
-                                                        vertex_buffer->stride);
+               if (!rstate->id) {
+                       if (rctx->chip_class >= EVERGREEN) {
+                               evergreen_pipe_init_buffer_resource(rctx, rstate);
+                       } else {
+                               r600_pipe_init_buffer_resource(rctx, rstate);
+                       }
+               }
+
+               if (rctx->chip_class >= EVERGREEN) {
+                       evergreen_pipe_mod_buffer_resource(&rctx->context, rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
+                       evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
                } else {
-                       r600_pipe_add_vertex_attrib(rctx, rstate, i,
-                                                   rbuffer, offset,
-                                                   vertex_buffer->stride);
+                       r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
+                       r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
                }
        }
 }
 
-void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+static int r600_shader_rebuild(struct pipe_context * ctx, struct r600_pipe_shader * shader)
 {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct r600_resource *rbuffer;
-       u32 vgt_dma_index_type, vgt_draw_initiator, mask;
-       struct r600_draw rdraw;
-       struct r600_pipe_state vgt;
-       struct r600_drawl draw = {};
-       unsigned prim;
+       int r;
 
-       if (rctx->vertex_elements->incompatible_layout) {
-               r600_begin_vertex_translate(rctx);
+       r600_pipe_shader_destroy(ctx, shader);
+       r = r600_pipe_shader_create(ctx, shader);
+       if (r) {
+               return r;
        }
+       r600_context_pipe_state_set(&rctx->ctx, &shader->rstate);
+
+       return 0;
+}
+
+static void r600_update_derived_state(struct r600_pipe_context *rctx)
+{
+       struct pipe_context * ctx = (struct pipe_context*)rctx;
+       struct r600_pipe_state rstate;
+       unsigned user_clip_plane_enable;
+       unsigned clip_dist_enable;
+
+       if (rctx->vs_shader->shader.clip_dist_write || rctx->vs_shader->shader.vs_prohibit_ucps)
+               user_clip_plane_enable = 0;
+       else
+               user_clip_plane_enable = rctx->rasterizer->clip_plane_enable & 0x3F;
+
+       clip_dist_enable = rctx->rasterizer->clip_plane_enable & rctx->vs_shader->shader.clip_dist_write;
+       rstate.nregs = 0;
 
-       if (rctx->any_user_vbs) {
-               r600_upload_user_buffers(rctx, info->min_index, info->max_index);
+       if (user_clip_plane_enable != rctx->user_clip_plane_enable) {
+               r600_pipe_state_add_reg(&rstate, R_028810_PA_CL_CLIP_CNTL, user_clip_plane_enable , 0x3F, NULL, 0);
+               rctx->user_clip_plane_enable = user_clip_plane_enable;
        }
 
-       r600_vertex_buffer_update(rctx);
+       if (clip_dist_enable != rctx->clip_dist_enable) {
+               r600_pipe_state_add_reg(&rstate, R_02881C_PA_CL_VS_OUT_CNTL, clip_dist_enable, 0xFF, NULL, 0);
+               rctx->clip_dist_enable = clip_dist_enable;
+       }
 
-       draw.info = *info;
-       draw.ctx = ctx;
-       if (info->indexed && rctx->index_buffer.buffer) {
-               draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
+       if (rstate.nregs)
+               r600_context_pipe_state_set(&rctx->ctx, &rstate);
 
-               r600_translate_index_buffer(rctx, &rctx->index_buffer.buffer,
-                                           &rctx->index_buffer.index_size,
-                                           &draw.info.start,
-                                           info->count);
+       if (!rctx->blitter->running) {
+               if (rctx->have_depth_fb || rctx->have_depth_texture)
+                       r600_flush_depth_textures(rctx);
+       }
 
-               draw.index_size = rctx->index_buffer.index_size;
-               pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
-               draw.index_buffer_offset = draw.info.start * draw.index_size;
-               draw.info.start = 0;
+       if (rctx->chip_class < EVERGREEN) {
+               r600_update_sampler_states(rctx);
+       }
 
-               if (r600_is_user_buffer(draw.index_buffer)) {
-                       r600_upload_index_buffer(rctx, &draw);
-               }
-       } else {
-               draw.info.index_bias = info->start;
+       if (rctx->vs_shader->shader.clamp_color != rctx->clamp_vertex_color) {
+               r600_shader_rebuild(&rctx->context, rctx->vs_shader);
        }
 
-       switch (draw.index_size) {
-       case 2:
-               vgt_draw_initiator = 0;
-               vgt_dma_index_type = 0;
-               break;
-       case 4:
-               vgt_draw_initiator = 0;
-               vgt_dma_index_type = 1;
-               break;
-       case 0:
-               vgt_draw_initiator = 2;
-               vgt_dma_index_type = 0;
-               break;
-       default:
-               R600_ERR("unsupported index size %d\n", draw.index_size);
-               return;
+       if ((rctx->ps_shader->shader.clamp_color != rctx->clamp_fragment_color) ||
+           (rctx->ps_shader->shader.two_side != rctx->two_side) ||
+           ((rctx->chip_class >= EVERGREEN) && rctx->ps_shader->shader.fs_write_all &&
+            (rctx->ps_shader->shader.nr_cbufs != rctx->nr_cbufs))) {
+               r600_shader_rebuild(&rctx->context, rctx->ps_shader);
        }
-       if (r600_conv_pipe_prim(draw.info.mode, &prim))
-               return;
-       if (unlikely(rctx->ps_shader == NULL)) {
-               R600_ERR("missing vertex shader\n");
-               return;
+
+       if (rctx->alpha_ref_dirty) {
+               r600_update_alpha_ref(rctx);
        }
-       if (unlikely(rctx->vs_shader == NULL)) {
-               R600_ERR("missing vertex shader\n");
-               return;
+
+       if (rctx->ps_shader && rctx->sprite_coord_enable &&
+               (rctx->ps_shader->sprite_coord_enable != rctx->sprite_coord_enable)) {
+
+               if (rctx->chip_class >= EVERGREEN)
+                       evergreen_pipe_shader_ps(ctx, rctx->ps_shader);
+               else
+                       r600_pipe_shader_ps(ctx, rctx->ps_shader);
+
+               r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
        }
-       /* there should be enough input */
-       if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
-               R600_ERR("%d resources provided, expecting %d\n",
-                       rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
+
+}
+
+void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
+{
+       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
+       struct pipe_draw_info info = *dinfo;
+       struct r600_draw rdraw = {};
+       struct pipe_index_buffer ib = {};
+       unsigned prim, mask, ls_mask = 0;
+
+       if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
+           (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
+           !r600_conv_pipe_prim(info.mode, &prim)) {
                return;
        }
 
-       r600_spi_update(rctx);
+       if (!rctx->ps_shader || !rctx->vs_shader)
+               return;
 
-       mask = 0;
-       for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
-               mask |= (0xF << (i * 4));
-       }
+       r600_update_derived_state(rctx);
 
-       vgt.id = R600_PIPE_STATE_VGT;
-       vgt.nregs = 0;
-       r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL);
-       r600_context_pipe_state_set(&rctx->ctx, &vgt);
+       u_vbuf_draw_begin(rctx->vbuf_mgr, &info);
+       r600_vertex_buffer_update(rctx);
+
+       rdraw.vgt_num_indices = info.count;
+       rdraw.vgt_num_instances = info.instance_count;
+
+       if (info.indexed) {
+               /* Initialize the index buffer struct. */
+               pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
+               ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
+               ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
 
-       rdraw.vgt_num_indices = draw.info.count;
-       rdraw.vgt_num_instances = 1;
-       rdraw.vgt_index_type = vgt_dma_index_type;
-       rdraw.vgt_draw_initiator = vgt_draw_initiator;
-       rdraw.indices = NULL;
-       if (draw.index_buffer) {
-               rbuffer = (struct r600_resource*)draw.index_buffer;
-               rdraw.indices = rbuffer->bo;
-               rdraw.indices_bo_offset = draw.index_buffer_offset;
+               /* Translate or upload, if needed. */
+               r600_translate_index_buffer(rctx, &ib, info.count);
+
+               if (u_vbuf_resource(ib.buffer)->user_ptr) {
+                       r600_upload_index_buffer(rctx, &ib, info.count);
+               }
+
+               /* Initialize the r600_draw struct with index buffer info. */
+               if (ib.index_size == 4) {
+                       rdraw.vgt_index_type = VGT_INDEX_32 |
+                               (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0);
+               } else {
+                       rdraw.vgt_index_type = VGT_INDEX_16 |
+                               (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0);
+               }
+               rdraw.indices = (struct r600_resource*)ib.buffer;
+               rdraw.indices_bo_offset = ib.offset;
+               rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_DMA;
+       } else {
+               info.index_bias = info.start;
+               rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
+               if (info.count_from_stream_output) {
+                       rdraw.vgt_draw_initiator |= S_0287F0_USE_OPAQUE(1);
+
+                       r600_context_draw_opaque_count(&rctx->ctx, (struct r600_so_target*)info.count_from_stream_output);
+               }
        }
 
-       if (rctx->family >= CHIP_CEDAR) {
+       rctx->ctx.vs_so_stride_in_dw = rctx->vs_shader->so.stride;
+
+       mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1;
+
+       if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
+               rctx->vgt.id = R600_PIPE_STATE_VGT;
+               rctx->vgt.nregs = 0;
+               r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, ~0, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, 0, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance, 0xFFFFFFFF, NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE,
+                                       0,
+                                       S_028A0C_AUTO_RESET_CNTL(3), NULL, 0);
+               r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL,
+                                       0,
+                                       S_028814_PROVOKING_VTX_LAST(1), NULL, 0);
+       }
+
+       rctx->vgt.nregs = 0;
+       r600_pipe_state_mod_reg(&rctx->vgt, prim);
+       r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
+       r600_pipe_state_mod_reg(&rctx->vgt, ~0);
+       r600_pipe_state_mod_reg(&rctx->vgt, 0);
+       r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias);
+       r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index);
+       r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart);
+       r600_pipe_state_mod_reg(&rctx->vgt, 0);
+       r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance);
+
+       if (prim == V_008958_DI_PT_LINELIST)
+               ls_mask = 1;
+       else if (prim == V_008958_DI_PT_LINESTRIP) 
+               ls_mask = 2;
+       r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask));
+
+       if (info.mode == PIPE_PRIM_QUADS || info.mode == PIPE_PRIM_QUAD_STRIP || info.mode == PIPE_PRIM_POLYGON) {
+               r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1));
+       }
+
+       r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt);
+
+       if (rctx->chip_class >= EVERGREEN) {
                evergreen_context_draw(&rctx->ctx, &rdraw);
        } else {
                r600_context_draw(&rctx->ctx, &rdraw);
        }
 
-       pipe_resource_reference(&draw.index_buffer, NULL);
-
-       /* delete previous translated vertex elements */
-       if (rctx->tran.new_velems) {
-               r600_end_vertex_translate(rctx);
+       if (rctx->framebuffer.zsbuf)
+       {
+               struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
+               ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
        }
+
+       pipe_resource_reference(&ib.buffer, NULL);
+       u_vbuf_draw_end(rctx->vbuf_mgr);
+}
+
+void _r600_pipe_state_add_reg(struct r600_context *ctx,
+                             struct r600_pipe_state *state,
+                             u32 offset, u32 value, u32 mask,
+                             u32 range_id, u32 block_id,
+                             struct r600_resource *bo,
+                             enum radeon_bo_usage usage)
+{
+       struct r600_range *range;
+       struct r600_block *block;
+
+       if (bo) assert(usage);
+
+       range = &ctx->range[range_id];
+       block = range->blocks[block_id];
+       state->regs[state->nregs].block = block;
+       state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
+
+       state->regs[state->nregs].value = value;
+       state->regs[state->nregs].mask = mask;
+       state->regs[state->nregs].bo = bo;
+       state->regs[state->nregs].bo_usage = usage;
+
+       state->nregs++;
+       assert(state->nregs < R600_BLOCK_MAX_REG);
+}
+
+void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
+                                    u32 offset, u32 value, u32 mask,
+                                    struct r600_resource *bo,
+                                    enum radeon_bo_usage usage)
+{
+       if (bo) assert(usage);
+
+       state->regs[state->nregs].id = offset;
+       state->regs[state->nregs].block = NULL;
+       state->regs[state->nregs].value = value;
+       state->regs[state->nregs].mask = mask;
+       state->regs[state->nregs].bo = bo;
+       state->regs[state->nregs].bo_usage = usage;
+
+       state->nregs++;
+       assert(state->nregs < R600_BLOCK_MAX_REG);
 }