gallium: change pipe_sampler_view::first_element/last_element -> offset/size
[mesa.git] / src / gallium / drivers / r600 / r600_state.c
index 1f933efca9ea8e6a2a521ddb3419b55efc89d8d6..62b1c2c282a8c2242522183fa5e9a67a974a3eb8 100644 (file)
@@ -143,13 +143,14 @@ static uint32_t r600_translate_dbformat(enum pipe_format format)
 
 static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format)
 {
-       return r600_translate_texformat(screen, format, NULL, NULL, NULL) != ~0U;
+       return r600_translate_texformat(screen, format, NULL, NULL, NULL,
+                                   FALSE) != ~0U;
 }
 
 static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format)
 {
-       return r600_translate_colorformat(chip, format) != ~0U &&
-              r600_translate_colorswap(format) != ~0U;
+       return r600_translate_colorformat(chip, format, FALSE) != ~0U &&
+              r600_translate_colorswap(format, FALSE) != ~0U;
 }
 
 static bool r600_is_zs_format_supported(enum pipe_format format)
@@ -239,32 +240,50 @@ boolean r600_is_format_supported(struct pipe_screen *screen,
        if (usage & PIPE_BIND_TRANSFER_WRITE)
                retval |= PIPE_BIND_TRANSFER_WRITE;
 
+       if ((usage & PIPE_BIND_LINEAR) &&
+           !util_format_is_compressed(format) &&
+           !(usage & PIPE_BIND_DEPTH_STENCIL))
+               retval |= PIPE_BIND_LINEAR;
+
        return retval == usage;
 }
 
 static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
        float offset_units = state->offset_units;
        float offset_scale = state->offset_scale;
-
-       switch (state->zs_format) {
-       case PIPE_FORMAT_Z24X8_UNORM:
-       case PIPE_FORMAT_Z24_UNORM_S8_UINT:
-               offset_units *= 2.0f;
-               break;
-       case PIPE_FORMAT_Z16_UNORM:
-               offset_units *= 4.0f;
-               break;
-       default:;
+       uint32_t pa_su_poly_offset_db_fmt_cntl = 0;
+
+       if (!state->offset_units_unscaled) {
+               switch (state->zs_format) {
+               case PIPE_FORMAT_Z24X8_UNORM:
+               case PIPE_FORMAT_Z24_UNORM_S8_UINT:
+                       offset_units *= 2.0f;
+                       pa_su_poly_offset_db_fmt_cntl =
+                               S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
+                       break;
+               case PIPE_FORMAT_Z16_UNORM:
+                       offset_units *= 4.0f;
+                       pa_su_poly_offset_db_fmt_cntl =
+                               S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
+                       break;
+               default:
+                       pa_su_poly_offset_db_fmt_cntl =
+                               S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
+                               S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
+               }
        }
 
-       r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
+       radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
        radeon_emit(cs, fui(offset_scale));
        radeon_emit(cs, fui(offset_units));
        radeon_emit(cs, fui(offset_scale));
        radeon_emit(cs, fui(offset_units));
+
+       radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
+                              pa_su_poly_offset_db_fmt_cntl);
 }
 
 static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i)
@@ -390,7 +409,7 @@ static void *r600_create_dsa_state(struct pipe_context *ctx,
        unsigned db_depth_control, alpha_test_control, alpha_ref;
        struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state);
 
-       if (dsa == NULL) {
+       if (!dsa) {
                return NULL;
        }
 
@@ -446,12 +465,13 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
        float psize_min, psize_max;
        struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state);
 
-       if (rs == NULL) {
+       if (!rs) {
                return NULL;
        }
 
        r600_init_command_buffer(&rs->buffer, 30);
 
+       rs->scissor_enable = state->scissor;
        rs->flatshade = state->flatshade;
        rs->sprite_coord_enable = state->sprite_coord_enable;
        rs->two_side = state->light_twoside;
@@ -460,7 +480,7 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
                                S_028A0C_LINE_PATTERN(state->line_stipple_pattern) |
                                S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0;
        rs->pa_cl_clip_cntl =
-               S_028810_PS_UCP_MODE(3) |
+               S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) |
                S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) |
                S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) |
                S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
@@ -472,8 +492,9 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
 
        /* offset */
        rs->offset_units = state->offset_units;
-       rs->offset_scale = state->offset_scale * 12.0f;
+       rs->offset_scale = state->offset_scale * 16.0f;
        rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri;
+       rs->offset_units_unscaled = state->offset_units_unscaled;
 
        if (state->point_size_per_vertex) {
                psize_min = util_get_min_point_size(state);
@@ -495,10 +516,9 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
        if (rctx->b.chip_class >= R700) {
                sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) |
                                S_028A4C_R700_ZMM_LINE_OFFSET(1) |
-                               S_028A4C_R700_VPORT_SCISSOR_ENABLE(state->scissor);
+                               S_028A4C_R700_VPORT_SCISSOR_ENABLE(1);
        } else {
                sc_mode_cntl |= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1);
-               rs->scissor_enable = state->scissor;
        }
 
        spi_interp = S_0286D4_FLAT_SHADE_ENA(1);
@@ -535,9 +555,9 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
                                 S_028814_CULL_FRONT(state->cull_face & PIPE_FACE_FRONT ? 1 : 0) |
                                 S_028814_CULL_BACK(state->cull_face & PIPE_FACE_BACK ? 1 : 0) |
                                 S_028814_FACE(!state->front_ccw) |
-                                S_028814_POLY_OFFSET_FRONT_ENABLE(state->offset_tri) |
-                                S_028814_POLY_OFFSET_BACK_ENABLE(state->offset_tri) |
-                                S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_tri) |
+                                S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) |
+                                S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) |
+                                S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) |
                                 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL ||
                                                                         state->fill_back != PIPE_POLYGON_MODE_FILL) |
                                 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) |
@@ -552,13 +572,26 @@ static void *r600_create_rs_state(struct pipe_context *ctx,
        return rs;
 }
 
+static unsigned r600_tex_filter(unsigned filter, unsigned max_aniso)
+{
+       if (filter == PIPE_TEX_FILTER_LINEAR)
+               return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_BILINEAR
+                                    : V_03C000_SQ_TEX_XY_FILTER_BILINEAR;
+       else
+               return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_POINT
+                                    : V_03C000_SQ_TEX_XY_FILTER_POINT;
+}
+
 static void *r600_create_sampler_state(struct pipe_context *ctx,
                                        const struct pipe_sampler_state *state)
 {
+       struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
        struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state);
-       unsigned aniso_flag_offset = state->max_anisotropy > 1 ? 4 : 0;
+       unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso
+                                                      : state->max_anisotropy;
+       unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso);
 
-       if (ss == NULL) {
+       if (!ss) {
                return NULL;
        }
 
@@ -570,10 +603,10 @@ static void *r600_create_sampler_state(struct pipe_context *ctx,
                S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) |
                S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) |
                S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) |
-               S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter) | aniso_flag_offset) |
-               S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter) | aniso_flag_offset) |
+               S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter, max_aniso)) |
+               S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter, max_aniso)) |
                S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) |
-               S_03C000_MAX_ANISO(r600_tex_aniso_filter(state->max_anisotropy)) |
+               S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) |
                S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) |
                S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0);
        /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
@@ -598,8 +631,8 @@ texture_buffer_sampler_view(struct r600_pipe_sampler_view *view,
        struct r600_texture *tmp = (struct r600_texture*)view->base.texture;
        int stride = util_format_get_blocksize(view->base.format);
        unsigned format, num_format, format_comp, endian;
-       uint64_t offset = view->base.u.buf.first_element * stride;
-       unsigned size = (view->base.u.buf.last_element - view->base.u.buf.first_element + 1) * stride;
+       uint64_t offset = view->base.u.buf.offset;
+       unsigned size = view->base.u.buf.size;
 
        r600_vertex_data_type(view->base.format,
                              &format, &num_format, &format_comp,
@@ -640,8 +673,9 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
        uint32_t word4 = 0, yuv_format = 0, pitch = 0;
        unsigned char swizzle[4], array_mode = 0;
        unsigned width, height, depth, offset_level, last_level;
+       bool do_endian_swap = FALSE;
 
-       if (view == NULL)
+       if (!view)
                return NULL;
 
        /* initialize base object */
@@ -660,16 +694,25 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
        swizzle[2] = state->swizzle_b;
        swizzle[3] = state->swizzle_a;
 
+       if (R600_BIG_ENDIAN)
+               do_endian_swap = !tmp->db_compatible;
+
        format = r600_translate_texformat(ctx->screen, state->format,
                                          swizzle,
-                                         &word4, &yuv_format);
+                                         &word4, &yuv_format, do_endian_swap);
        assert(format != ~0);
        if (format == ~0) {
                FREE(view);
                return NULL;
        }
 
-       if (tmp->is_depth && !tmp->is_flushing_texture && !r600_can_read_depth(tmp)) {
+       if (state->format == PIPE_FORMAT_X24S8_UINT ||
+           state->format == PIPE_FORMAT_S8X24_UINT ||
+           state->format == PIPE_FORMAT_X32_S8X24_UINT ||
+           state->format == PIPE_FORMAT_S8_UINT)
+               view->is_stencil_sampler = true;
+
+       if (tmp->is_depth && !r600_can_sample_zs(tmp, view->is_stencil_sampler)) {
                if (!r600_init_flushed_depth_texture(ctx, texture, NULL)) {
                        FREE(view);
                        return NULL;
@@ -677,7 +720,7 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
                tmp = tmp->flushed_depth_texture;
        }
 
-       endian = r600_colorformat_endian_swap(format);
+       endian = r600_colorformat_endian_swap(format, do_endian_swap);
 
        offset_level = state->u.tex.first_level;
        last_level = state->u.tex.last_level - offset_level;
@@ -693,7 +736,9 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
                depth = texture->array_size;
        } else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY)
                depth = texture->array_size / 6;
+
        switch (tmp->surface.level[offset_level].mode) {
+       default:
        case RADEON_SURF_MODE_LINEAR_ALIGNED:
                array_mode = V_038000_ARRAY_LINEAR_ALIGNED;
                break;
@@ -703,10 +748,6 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
        case RADEON_SURF_MODE_2D:
                array_mode = V_038000_ARRAY_2D_TILED_THIN1;
                break;
-       case RADEON_SURF_MODE_LINEAR:
-       default:
-               array_mode = V_038000_ARRAY_LINEAR_GENERAL;
-               break;
        }
 
        view->tex_resource = &tmp->resource;
@@ -753,10 +794,10 @@ r600_create_sampler_view(struct pipe_context *ctx,
 
 static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct pipe_clip_state *state = &rctx->clip_state.state;
 
-       r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
+       radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
        radeon_emit_array(cs, (unsigned*)state, 6*4);
 }
 
@@ -765,46 +806,6 @@ static void r600_set_polygon_stipple(struct pipe_context *ctx,
 {
 }
 
-static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom)
-{
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
-       struct r600_scissor_state *rstate = (struct r600_scissor_state *)atom;
-       struct pipe_scissor_state *state = &rstate->scissor;
-       unsigned offset = rstate->idx * 4 * 2;
-
-       if (rctx->b.chip_class != R600 || rctx->scissor[0].enable) {
-               r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2);
-               radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) |
-                                    S_028240_WINDOW_OFFSET_DISABLE(1));
-               radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy));
-       } else {
-               r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
-               radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
-                                    S_028240_WINDOW_OFFSET_DISABLE(1));
-               radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192));
-       }
-}
-
-static void r600_set_scissor_states(struct pipe_context *ctx,
-                                    unsigned start_slot,
-                                    unsigned num_scissors,
-                                    const struct pipe_scissor_state *state)
-{
-       struct r600_context *rctx = (struct r600_context *)ctx;
-       int i;
-
-       for (i = start_slot ; i < start_slot + num_scissors; i++) {
-               rctx->scissor[i].scissor = state[i - start_slot];
-       }
-
-       if (rctx->b.chip_class == R600 && !rctx->scissor[0].enable)
-               return;
-
-       for (i = start_slot ; i < start_slot + num_scissors; i++) {
-               rctx->scissor[i].atom.dirty = true;
-       }
-}
-
 static struct r600_resource *r600_buffer_create_helper(struct r600_screen *rscreen,
                                                       unsigned size, unsigned alignment)
 {
@@ -839,23 +840,17 @@ static void r600_init_color_surface(struct r600_context *rctx,
        unsigned offset;
        const struct util_format_description *desc;
        int i;
-       bool blend_bypass = 0, blend_clamp = 1;
+       bool blend_bypass = 0, blend_clamp = 1, do_endian_swap = FALSE;
 
-       if (rtex->is_depth && !rtex->is_flushing_texture && !r600_can_read_depth(rtex)) {
+       if (rtex->db_compatible && !r600_can_sample_zs(rtex, false)) {
                r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL);
                rtex = rtex->flushed_depth_texture;
                assert(rtex);
        }
 
        offset = rtex->surface.level[level].offset;
-       if (rtex->surface.level[level].mode == RADEON_SURF_MODE_LINEAR) {
-               assert(surf->base.u.tex.first_layer == surf->base.u.tex.last_layer);
-               offset += rtex->surface.level[level].slice_size *
-                       surf->base.u.tex.first_layer;
-               color_view = 0;
-       } else
-               color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) |
-                            S_028080_SLICE_MAX(surf->base.u.tex.last_layer);
+       color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) |
+                    S_028080_SLICE_MAX(surf->base.u.tex.last_layer);
 
        pitch = rtex->surface.level[level].nblk_x / 8 - 1;
        slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64;
@@ -864,6 +859,7 @@ static void r600_init_color_surface(struct r600_context *rctx,
        }
        color_info = 0;
        switch (rtex->surface.level[level].mode) {
+       default:
        case RADEON_SURF_MODE_LINEAR_ALIGNED:
                color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED);
                break;
@@ -873,10 +869,6 @@ static void r600_init_color_surface(struct r600_context *rctx,
        case RADEON_SURF_MODE_2D:
                color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1);
                break;
-       case RADEON_SURF_MODE_LINEAR:
-       default:
-               color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_GENERAL);
-               break;
        }
 
        desc = util_format_description(surf->base.format);
@@ -902,17 +894,17 @@ static void r600_init_color_surface(struct r600_context *rctx,
                        ntype = V_0280A0_NUMBER_UINT;
        }
 
-       format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format);
+       if (R600_BIG_ENDIAN)
+               do_endian_swap = !rtex->db_compatible;
+
+       format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format,
+                                                     do_endian_swap);
        assert(format != ~0);
 
-       swap = r600_translate_colorswap(surf->base.format);
+       swap = r600_translate_colorswap(surf->base.format, do_endian_swap);
        assert(swap != ~0);
 
-       if (rtex->resource.b.b.usage == PIPE_USAGE_STAGING) {
-               endian = ENDIAN_NONE;
-       } else {
-               endian = r600_colorformat_endian_swap(format);
-       }
+       endian = r600_colorformat_endian_swap(format, do_endian_swap);
 
        /* set blend bypass according to docs if SINT/UINT or
           8/24 COLOR variants */
@@ -975,10 +967,8 @@ static void r600_init_color_surface(struct r600_context *rctx,
        surf->cb_color_cmask = surf->cb_color_base;
        surf->cb_color_mask = 0;
 
-       pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask,
-                               &rtex->resource.b.b);
-       pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask,
-                               &rtex->resource.b.b);
+       r600_resource_reference(&surf->cb_buffer_cmask, &rtex->resource);
+       r600_resource_reference(&surf->cb_buffer_fmask, &rtex->resource);
 
        if (rtex->cmask.size) {
                surf->cb_color_cmask = rtex->cmask.offset >> 8;
@@ -1006,12 +996,12 @@ static void r600_init_color_surface(struct r600_context *rctx,
 
                /* CMASK. */
                if (!rctx->dummy_cmask ||
-                   rctx->dummy_cmask->buf->size < cmask.size ||
+                   rctx->dummy_cmask->b.b.width0 < cmask.size ||
                    rctx->dummy_cmask->buf->alignment % cmask.alignment != 0) {
                        struct pipe_transfer *transfer;
                        void *ptr;
 
-                       pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
+                       r600_resource_reference(&rctx->dummy_cmask, NULL);
                        rctx->dummy_cmask = r600_buffer_create_helper(rscreen, cmask.size, cmask.alignment);
 
                        /* Set the contents to 0xCC. */
@@ -1019,19 +1009,17 @@ static void r600_init_color_surface(struct r600_context *rctx,
                        memset(ptr, 0xCC, cmask.size);
                        pipe_buffer_unmap(&rctx->b.b, transfer);
                }
-               pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask,
-                                       &rctx->dummy_cmask->b.b);
+               r600_resource_reference(&surf->cb_buffer_cmask, rctx->dummy_cmask);
 
                /* FMASK. */
                if (!rctx->dummy_fmask ||
-                   rctx->dummy_fmask->buf->size < fmask.size ||
+                   rctx->dummy_fmask->b.b.width0 < fmask.size ||
                    rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) {
-                       pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
+                       r600_resource_reference(&rctx->dummy_fmask, NULL);
                        rctx->dummy_fmask = r600_buffer_create_helper(rscreen, fmask.size, fmask.alignment);
 
                }
-               pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask,
-                                       &rctx->dummy_fmask->b.b);
+               r600_resource_reference(&surf->cb_buffer_fmask, rctx->dummy_fmask);
 
                /* Init the registers. */
                color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE);
@@ -1065,7 +1053,6 @@ static void r600_init_depth_surface(struct r600_context *rctx,
                break;
        case RADEON_SURF_MODE_1D:
        case RADEON_SURF_MODE_LINEAR_ALIGNED:
-       case RADEON_SURF_MODE_LINEAR:
        default:
                array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
                break;
@@ -1081,25 +1068,6 @@ static void r600_init_depth_surface(struct r600_context *rctx,
        surf->db_depth_size = S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice);
        surf->db_prefetch_limit = (rtex->surface.level[level].nblk_y / 8) - 1;
 
-       switch (surf->base.format) {
-       case PIPE_FORMAT_Z24X8_UNORM:
-       case PIPE_FORMAT_Z24_UNORM_S8_UINT:
-               surf->pa_su_poly_offset_db_fmt_cntl =
-                       S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
-               break;
-       case PIPE_FORMAT_Z32_FLOAT:
-       case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
-               surf->pa_su_poly_offset_db_fmt_cntl =
-                       S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
-                       S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
-               break;
-       case PIPE_FORMAT_Z16_UNORM:
-               surf->pa_su_poly_offset_db_fmt_cntl =
-                       S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
-               break;
-       default:;
-       }
-
        /* use htile only for first level */
        if (rtex->htile_buffer && !level) {
                surf->db_htile_data_base = 0;
@@ -1121,20 +1089,17 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx,
        struct r600_texture *rtex;
        unsigned i;
 
-       if (rctx->framebuffer.state.nr_cbufs) {
-               rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
-               rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
-                                R600_CONTEXT_FLUSH_AND_INV_CB_META;
-       }
-       if (rctx->framebuffer.state.zsbuf) {
-               rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
-               rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB;
-
-               rtex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture;
-               if (rctx->b.chip_class >= R700 && rtex->htile_buffer) {
-                       rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META;
-               }
-       }
+       /* Flush TC when changing the framebuffer state, because the only
+        * client not using TC that can change textures is the framebuffer.
+        * Other places don't typically have to flush TC.
+        */
+       rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE |
+                        R600_CONTEXT_FLUSH_AND_INV |
+                        R600_CONTEXT_FLUSH_AND_INV_CB |
+                        R600_CONTEXT_FLUSH_AND_INV_CB_META |
+                        R600_CONTEXT_FLUSH_AND_INV_DB |
+                        R600_CONTEXT_FLUSH_AND_INV_DB_META |
+                        R600_CONTEXT_INV_TEX_CACHE;
 
        /* Set the new state. */
        util_copy_framebuffer_state(&rctx->framebuffer.state, state);
@@ -1192,7 +1157,7 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx,
 
                if (rctx->alphatest_state.bypass != alphatest_bypass) {
                        rctx->alphatest_state.bypass = alphatest_bypass;
-                       rctx->alphatest_state.atom.dirty = true;
+                       r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
                }
        }
 
@@ -1208,28 +1173,28 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx,
 
                if (state->zsbuf->format != rctx->poly_offset_state.zs_format) {
                        rctx->poly_offset_state.zs_format = state->zsbuf->format;
-                       rctx->poly_offset_state.atom.dirty = true;
+                       r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
                }
 
                if (rctx->db_state.rsurf != surf) {
                        rctx->db_state.rsurf = surf;
-                       rctx->db_state.atom.dirty = true;
-                       rctx->db_misc_state.atom.dirty = true;
+                       r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
+                       r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
                }
        } else if (rctx->db_state.rsurf) {
                rctx->db_state.rsurf = NULL;
-               rctx->db_state.atom.dirty = true;
-               rctx->db_misc_state.atom.dirty = true;
+               r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
+               r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
        }
 
        if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) {
                rctx->cb_misc_state.nr_cbufs = state->nr_cbufs;
-               rctx->cb_misc_state.atom.dirty = true;
+               r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
        }
 
        if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) {
                rctx->alphatest_state.bypass = false;
-               rctx->alphatest_state.atom.dirty = true;
+               r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
        }
 
        /* Calculate the CS size. */
@@ -1249,7 +1214,7 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx,
                rctx->framebuffer.atom.num_dw += 2;
        }
 
-       rctx->framebuffer.atom.dirty = true;
+       r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
 
        r600_set_sample_locations_constant_buffer(rctx);
 }
@@ -1312,7 +1277,7 @@ static void r600_get_sample_position(struct pipe_context *ctx,
 
 static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        unsigned max_dist = 0;
 
        if (rctx->b.family == CHIP_R600) {
@@ -1321,15 +1286,15 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
                        nr_samples = 0;
                        break;
                case 2:
-                       r600_write_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]);
+                       radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]);
                        max_dist = max_dist_2x;
                        break;
                case 4:
-                       r600_write_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]);
+                       radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]);
                        max_dist = max_dist_4x;
                        break;
                case 8:
-                       r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
+                       radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
                        radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
                        radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
                        max_dist = max_dist_8x;
@@ -1338,25 +1303,25 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
        } else {
                switch (nr_samples) {
                default:
-                       r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+                       radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
                        radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
                        radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
                        nr_samples = 0;
                        break;
                case 2:
-                       r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+                       radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
                        radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
                        radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
                        max_dist = max_dist_2x;
                        break;
                case 4:
-                       r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+                       radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
                        radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
                        radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
                        max_dist = max_dist_4x;
                        break;
                case 8:
-                       r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+                       radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
                        radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
                        radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
                        max_dist = max_dist_8x;
@@ -1365,13 +1330,13 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
        }
 
        if (nr_samples > 1) {
-               r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+               radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
                radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
                                     S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
                radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
                                     S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
        } else {
-               r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+               radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
                radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
                radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
        }
@@ -1379,14 +1344,14 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
 
 static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
        unsigned nr_cbufs = state->nr_cbufs;
        struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0];
        unsigned i, sbu = 0;
 
        /* Colorbuffers. */
-       r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
+       radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
        for (i = 0; i < nr_cbufs; i++) {
                radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0);
        }
@@ -1407,10 +1372,10 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
                                continue;
 
                        /* COLOR_BASE */
-                       r600_write_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
+                       radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
 
-                       reloc = r600_context_bo_reloc(&rctx->b,
-                                                     &rctx->b.rings.gfx,
+                       reloc = radeon_add_to_buffer_list(&rctx->b,
+                                                     &rctx->b.gfx,
                                                      (struct r600_resource*)cb[i]->base.texture,
                                                      RADEON_USAGE_READWRITE,
                                                      cb[i]->base.texture->nr_samples > 1 ?
@@ -1420,10 +1385,10 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
                        radeon_emit(cs, reloc);
 
                        /* FMASK */
-                       r600_write_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
+                       radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
 
-                       reloc = r600_context_bo_reloc(&rctx->b,
-                                                     &rctx->b.rings.gfx,
+                       reloc = radeon_add_to_buffer_list(&rctx->b,
+                                                     &rctx->b.gfx,
                                                      cb[i]->cb_buffer_fmask,
                                                      RADEON_USAGE_READWRITE,
                                                      cb[i]->base.texture->nr_samples > 1 ?
@@ -1433,10 +1398,10 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
                        radeon_emit(cs, reloc);
 
                        /* CMASK */
-                       r600_write_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
+                       radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
 
-                       reloc = r600_context_bo_reloc(&rctx->b,
-                                                     &rctx->b.rings.gfx,
+                       reloc = radeon_add_to_buffer_list(&rctx->b,
+                                                     &rctx->b.gfx,
                                                      cb[i]->cb_buffer_cmask,
                                                      RADEON_USAGE_READWRITE,
                                                      cb[i]->base.texture->nr_samples > 1 ?
@@ -1446,17 +1411,17 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
                        radeon_emit(cs, reloc);
                }
 
-               r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
+               radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
                for (i = 0; i < nr_cbufs; i++) {
                        radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0);
                }
 
-               r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
+               radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
                for (i = 0; i < nr_cbufs; i++) {
                        radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0);
                }
 
-               r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
+               radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
                for (i = 0; i < nr_cbufs; i++) {
                        radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0);
                }
@@ -1474,34 +1439,31 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
        /* Zbuffer. */
        if (state->zsbuf) {
                struct r600_surface *surf = (struct r600_surface*)state->zsbuf;
-               unsigned reloc = r600_context_bo_reloc(&rctx->b,
-                                                      &rctx->b.rings.gfx,
+               unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
+                                                      &rctx->b.gfx,
                                                       (struct r600_resource*)state->zsbuf->texture,
                                                       RADEON_USAGE_READWRITE,
                                                       surf->base.texture->nr_samples > 1 ?
                                                               RADEON_PRIO_DEPTH_BUFFER_MSAA :
                                                               RADEON_PRIO_DEPTH_BUFFER);
 
-               r600_write_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
-                                      surf->pa_su_poly_offset_db_fmt_cntl);
-
-               r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
+               radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
                radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */
                radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */
-               r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
+               radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
                radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */
                radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                radeon_emit(cs, reloc);
 
-               r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
+               radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
 
                sbu |= SURFACE_BASE_UPDATE_DEPTH;
        } else if (rctx->screen->b.info.drm_minor >= 18) {
                /* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
                 * Older kernels are out of luck. */
-               r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
+               radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
        }
 
        /* SURFACE_BASE_UPDATE */
@@ -1512,19 +1474,19 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
        }
 
        /* Framebuffer dimensions. */
-       r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
+       radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
        radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
                             S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
        radeon_emit(cs, S_028244_BR_X(state->width) |
                             S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
 
        if (rctx->framebuffer.is_msaa_resolve) {
-               r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1);
+               radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1);
        } else {
                /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This
                 * will assure that the alpha-test will work even if there is
                 * no colorbuffer bound. */
-               r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL,
+               radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL,
                                       (1ull << MAX2(nr_cbufs, 1)) - 1);
        }
 
@@ -1540,19 +1502,19 @@ static void r600_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
 
        rctx->ps_iter_samples = min_samples;
        if (rctx->framebuffer.nr_samples > 1) {
-               rctx->rasterizer_state.atom.dirty = true;
+               r600_mark_atom_dirty(rctx, &rctx->rasterizer_state.atom);
                if (rctx->b.chip_class == R600)
-                       rctx->db_misc_state.atom.dirty = true;
+                       r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
        }
 }
 
 static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
 
        if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) {
-               r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
+               radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
                if (rctx->b.chip_class == R600) {
                        radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */
                        radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */
@@ -1560,17 +1522,17 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom
                        radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */
                        radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */
                }
-               r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
+               radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
        } else {
                unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1;
                unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1;
                unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1;
 
-               r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
+               radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
                radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
                /* Always enable the first color output to make sure alpha-test works even without one. */
                radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */
-               r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL,
+               radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL,
                                       a->cb_color_control |
                                       S_028808_MULTIWRITE_ENABLE(multiwrite));
        }
@@ -1578,40 +1540,59 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom
 
 static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_db_state *a = (struct r600_db_state*)atom;
 
        if (a->rsurf && a->rsurf->db_htile_surface) {
                struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
                unsigned reloc_idx;
 
-               r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
-               r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
-               r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
-               reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer,
-                                                 RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META);
-               cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
-               cs->buf[cs->cdw++] = reloc_idx;
+               radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
+               radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
+               radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
+               reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
+                                                 RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
+               radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+               radeon_emit(cs, reloc_idx);
        } else {
-               r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0);
+               radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0);
        }
 }
 
 static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
        unsigned db_render_control = 0;
        unsigned db_render_override =
                S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) |
                S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE);
 
-       if (a->occlusion_query_enabled) {
+       if (rctx->b.chip_class >= R700) {
+               switch (a->ps_conservative_z) {
+               default: /* fall through */
+               case TGSI_FS_DEPTH_LAYOUT_ANY:
+                       db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_ANY_Z);
+                       break;
+               case TGSI_FS_DEPTH_LAYOUT_GREATER:
+                       db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_GREATER_THAN_Z);
+                       break;
+               case TGSI_FS_DEPTH_LAYOUT_LESS:
+                       db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_LESS_THAN_Z);
+                       break;
+               }
+       }
+
+       if (rctx->b.num_occlusion_queries > 0 &&
+           !a->occlusion_queries_disabled) {
                if (rctx->b.chip_class >= R700) {
                        db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
                }
                db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
+       } else {
+               db_render_control |= S_028D0C_ZPASS_INCREMENT_DISABLE(1);
        }
+
        if (rctx->db_state.rsurf && rctx->db_state.rsurf->db_htile_surface) {
                /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */
                db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF);
@@ -1643,9 +1624,9 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom
                if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 ||
                    rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635)
                        db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE);
-       } else if (a->flush_depthstencil_in_place) {
-               db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(1) |
-                                    S_028D0C_STENCIL_COMPRESS_DISABLE(1);
+       } else if (a->flush_depth_inplace || a->flush_stencil_inplace) {
+               db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) |
+                                    S_028D0C_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace);
                db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
        }
        if (a->htile_clear) {
@@ -1657,24 +1638,24 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom
                db_render_override |= S_028D10_MAX_TILES_IN_DTT(6);
        }
 
-       r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
+       radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
        radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */
        radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */
-       r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
+       radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
 }
 
 static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_config_state *a = (struct r600_config_state*)atom;
 
-       r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
-       r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
+       radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
+       radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
 }
 
 static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask;
 
        while (dirty_mask) {
@@ -1689,11 +1670,11 @@ static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom
 
                offset = vb->buffer_offset;
 
-               /* fetch resources start at index 320 */
+               /* fetch resources start at index 320 (OFFSET_FS) */
                radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
-               radeon_emit(cs, (320 + buffer_index) * 7);
+               radeon_emit(cs, (R600_FETCH_CONSTANTS_OFFSET_FS + buffer_index) * 7);
                radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
-               radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
+               radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */
                radeon_emit(cs, /* RESOURCEi_WORD2 */
                                 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
                                 S_038008_STRIDE(vb->stride));
@@ -1703,8 +1684,8 @@ static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom
                radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
-                                                     RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO));
+               radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+                                                     RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
        }
 }
 
@@ -1714,7 +1695,7 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
                                       unsigned reg_alu_constbuf_size,
                                       unsigned reg_alu_const_cache)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        uint32_t dirty_mask = state->dirty_mask;
 
        while (dirty_mask) {
@@ -1730,19 +1711,19 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
                offset = cb->buffer_offset;
 
                if (!gs_ring_buffer) {
-                       r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
-                                              ALIGN_DIVUP(cb->buffer_size >> 4, 16));
-                       r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+                       radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
+                                              DIV_ROUND_UP(cb->buffer_size, 256));
+                       radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
                }
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
-                                                     RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO));
+               radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+                                                     RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
 
                radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
                radeon_emit(cs, (buffer_id_base + buffer_index) * 7);
                radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
-               radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
+               radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */
                radeon_emit(cs, /* RESOURCEi_WORD2 */
                            S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
                            S_038008_STRIDE(gs_ring_buffer ? 4 : 16));
@@ -1752,8 +1733,8 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
                radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
-                                                     RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO));
+               radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+                                                     RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
 
                dirty_mask &= ~(1 << buffer_index);
        }
@@ -1762,21 +1743,24 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
 
 static void r600_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 160,
+       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
+                                  R600_FETCH_CONSTANTS_OFFSET_VS,
                                   R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
                                   R_028980_ALU_CONST_CACHE_VS_0);
 }
 
 static void r600_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY], 336,
+       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY],
+                                  R600_FETCH_CONSTANTS_OFFSET_GS,
                                   R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0,
                                   R_0289C0_ALU_CONST_CACHE_GS_0);
 }
 
 static void r600_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT], 0,
+       r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT],
+                                  R600_FETCH_CONSTANTS_OFFSET_PS,
                                   R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
                                   R_028940_ALU_CONST_CACHE_PS_0);
 }
@@ -1785,7 +1769,7 @@ static void r600_emit_sampler_views(struct r600_context *rctx,
                                    struct r600_samplerview_state *state,
                                    unsigned resource_id_base)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        uint32_t dirty_mask = state->dirty_mask;
 
        while (dirty_mask) {
@@ -1800,11 +1784,9 @@ static void r600_emit_sampler_views(struct r600_context *rctx,
                radeon_emit(cs, (resource_id_base + resource_index) * 7);
                radeon_emit_array(cs, rview->tex_resource_words, 7);
 
-               reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource,
+               reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
                                              RADEON_USAGE_READ,
-                                             rview->tex_resource->b.b.nr_samples > 1 ?
-                                                     RADEON_PRIO_SHADER_TEXTURE_MSAA :
-                                                     RADEON_PRIO_SHADER_TEXTURE_RO);
+                                             r600_get_sampler_view_priority(rview->tex_resource));
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                radeon_emit(cs, reloc);
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
@@ -1813,26 +1795,20 @@ static void r600_emit_sampler_views(struct r600_context *rctx,
        state->dirty_mask = 0;
 }
 
-/* Resource IDs:
- *   PS: 0   .. +160
- *   VS: 160 .. +160
- *   FS: 320 .. +16
- *   GS: 336 .. +160
- */
 
 static void r600_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, 160 + R600_MAX_CONST_BUFFERS);
+       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, R600_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS);
 }
 
 static void r600_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, 336 + R600_MAX_CONST_BUFFERS);
+       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, R600_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS);
 }
 
 static void r600_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
 {
-       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_MAX_CONST_BUFFERS);
+       r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS);
 }
 
 static void r600_emit_sampler_states(struct r600_context *rctx,
@@ -1840,7 +1816,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx,
                                unsigned resource_id_base,
                                unsigned border_color_reg)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        uint32_t dirty_mask = texinfo->states.dirty_mask;
 
        while (dirty_mask) {
@@ -1877,7 +1853,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx,
 
                        offset = border_color_reg;
                        offset += i * 16;
-                       r600_write_config_reg_seq(cs, offset, 4);
+                       radeon_set_config_reg_seq(cs, offset, 4);
                        radeon_emit_array(cs, rstate->border_color.ui, 4);
                }
        }
@@ -1901,7 +1877,7 @@ static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_a
 
 static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        unsigned tmp;
 
        tmp = S_009508_DISABLE_CUBE_ANISO(1) |
@@ -1911,7 +1887,7 @@ static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_a
        if (!rctx->seamless_cube_map.enabled) {
                tmp |= S_009508_DISABLE_CUBE_WRAP(1);
        }
-       r600_write_config_reg(cs, R_009508_TA_CNTL_AUX, tmp);
+       radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp);
 }
 
 static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
@@ -1919,37 +1895,43 @@ static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a
        struct r600_sample_mask *s = (struct r600_sample_mask*)a;
        uint8_t mask = s->sample_mask;
 
-       r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
+       radeon_set_context_reg(rctx->b.gfx.cs, R_028C48_PA_SC_AA_MASK,
                               mask | (mask << 8) | (mask << 16) | (mask << 24));
 }
 
 static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_cso_state *state = (struct r600_cso_state*)a;
        struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
 
-       r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
+       radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
        radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-       radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
-                                             RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA));
+       radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
+                                                  RADEON_USAGE_READ,
+                                                  RADEON_PRIO_INTERNAL_SHADER));
 }
 
 static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
 
        uint32_t v2 = 0, primid = 0;
 
+       if (rctx->vs_shader->current->shader.vs_as_gs_a) {
+               v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
+               primid = 1;
+       }
+
        if (state->geom_enable) {
                uint32_t cut_val;
 
-               if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 128)
+               if (rctx->gs_shader->gs_max_out_vertices <= 128)
                        cut_val = V_028A40_GS_CUT_128;
-               else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 256)
+               else if (rctx->gs_shader->gs_max_out_vertices <= 256)
                        cut_val = V_028A40_GS_CUT_256;
-               else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 512)
+               else if (rctx->gs_shader->gs_max_out_vertices <= 512)
                        cut_val = V_028A40_GS_CUT_512;
                else
                        cut_val = V_028A40_GS_CUT_1024;
@@ -1961,44 +1943,44 @@ static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom
                        primid = 1;
        }
 
-       r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
-       r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
+       radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
+       radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
 }
 
 static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
        struct r600_resource *rbuffer;
 
-       r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+       radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
        radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
        radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
 
        if (state->enable) {
                rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
-               r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
+               radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+               radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
                                                      RADEON_USAGE_READWRITE,
-                                                     RADEON_PRIO_SHADER_RESOURCE_RW));
-               r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
+                                                     RADEON_PRIO_SHADER_RINGS));
+               radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
                                state->esgs_ring.buffer_size >> 8);
 
                rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
-               r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
+               radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
-               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+               radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
                                                      RADEON_USAGE_READWRITE,
-                                                     RADEON_PRIO_SHADER_RESOURCE_RW));
-               r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
+                                                     RADEON_PRIO_SHADER_RINGS));
+               radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
                                state->gsvs_ring.buffer_size >> 8);
        } else {
-               r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
-               r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
+               radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
+               radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
        }
 
-       r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+       radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
        radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
        radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
 }
@@ -2006,57 +1988,62 @@ static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
 /* Adjust GPR allocation on R6xx/R7xx */
 bool r600_adjust_gprs(struct r600_context *rctx)
 {
-       unsigned num_ps_gprs = rctx->ps_shader->current->shader.bc.ngpr;
-       unsigned num_vs_gprs, num_es_gprs, num_gs_gprs;
-       unsigned new_num_ps_gprs = num_ps_gprs;
-       unsigned new_num_vs_gprs, new_num_es_gprs, new_num_gs_gprs;
-       unsigned cur_num_ps_gprs = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
-       unsigned cur_num_vs_gprs = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
-       unsigned cur_num_gs_gprs = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
-       unsigned cur_num_es_gprs = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
-       unsigned def_num_ps_gprs = rctx->default_ps_gprs;
-       unsigned def_num_vs_gprs = rctx->default_vs_gprs;
-       unsigned def_num_gs_gprs = 0;
-       unsigned def_num_es_gprs = 0;
+       unsigned num_gprs[R600_NUM_HW_STAGES];
+       unsigned new_gprs[R600_NUM_HW_STAGES];
+       unsigned cur_gprs[R600_NUM_HW_STAGES];
+       unsigned def_gprs[R600_NUM_HW_STAGES];
        unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
-       /* hardware will reserve twice num_clause_temp_gprs */
-       unsigned max_gprs = def_num_gs_gprs + def_num_es_gprs + def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2;
+       unsigned max_gprs;
        unsigned tmp, tmp2;
+       unsigned i;
+       bool need_recalc = false, use_default = true;
 
+       /* hardware will reserve twice num_clause_temp_gprs */
+       max_gprs = def_num_clause_temp_gprs * 2;
+       for (i = 0; i < R600_NUM_HW_STAGES; i++) {
+               def_gprs[i] = rctx->default_gprs[i];
+               max_gprs += def_gprs[i];
+       }
+
+       cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
+       cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
+       cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
+       cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
+
+       num_gprs[R600_HW_STAGE_PS] = rctx->ps_shader->current->shader.bc.ngpr;
        if (rctx->gs_shader) {
-               num_es_gprs = rctx->vs_shader->current->shader.bc.ngpr;
-               num_gs_gprs = rctx->gs_shader->current->shader.bc.ngpr;
-               num_vs_gprs = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr;
+               num_gprs[R600_HW_STAGE_ES] = rctx->vs_shader->current->shader.bc.ngpr;
+               num_gprs[R600_HW_STAGE_GS] = rctx->gs_shader->current->shader.bc.ngpr;
+               num_gprs[R600_HW_STAGE_VS] = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr;
        } else {
-               num_es_gprs = 0;
-               num_gs_gprs = 0;
-               num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+               num_gprs[R600_HW_STAGE_ES] = 0;
+               num_gprs[R600_HW_STAGE_GS] = 0;
+               num_gprs[R600_HW_STAGE_VS] = rctx->vs_shader->current->shader.bc.ngpr;
+       }
+
+       for (i = 0; i < R600_NUM_HW_STAGES; i++) {
+               new_gprs[i] = num_gprs[i];
+               if (new_gprs[i] > cur_gprs[i])
+                       need_recalc = true;
+               if (new_gprs[i] > def_gprs[i])
+                       use_default = false;
        }
-       new_num_vs_gprs = num_vs_gprs;
-       new_num_es_gprs = num_es_gprs;
-       new_num_gs_gprs = num_gs_gprs;
 
        /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
-       if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs ||
-           new_num_es_gprs > cur_num_es_gprs || new_num_gs_gprs > cur_num_gs_gprs) {
-               /* try to use switch back to default */
-               if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs ||
-                   new_num_gs_gprs > def_num_gs_gprs || new_num_es_gprs > def_num_es_gprs) {
-                       /* always privilege vs stage so that at worst we have the
-                        * pixel stage producing wrong output (not the vertex
-                        * stage) */
-                       new_num_ps_gprs = max_gprs - ((new_num_vs_gprs - new_num_es_gprs - new_num_gs_gprs) + def_num_clause_temp_gprs * 2);
-                       new_num_vs_gprs = num_vs_gprs;
-                       new_num_gs_gprs = num_gs_gprs;
-                       new_num_es_gprs = num_es_gprs;
-               } else {
-                       new_num_ps_gprs = def_num_ps_gprs;
-                       new_num_vs_gprs = def_num_vs_gprs;
-                       new_num_es_gprs = def_num_es_gprs;
-                       new_num_gs_gprs = def_num_gs_gprs;
-               }
-       } else {
+       if (!need_recalc)
                return true;
+
+       /* try to use switch back to default */
+       if (!use_default) {
+               /* always privilege vs stage so that at worst we have the
+                * pixel stage producing wrong output (not the vertex
+                * stage) */
+               new_gprs[R600_HW_STAGE_PS] = max_gprs - def_num_clause_temp_gprs * 2;
+               for (i = R600_HW_STAGE_VS; i < R600_NUM_HW_STAGES; i++)
+                       new_gprs[R600_HW_STAGE_PS] -= new_gprs[i];
+       } else {
+               for (i = 0; i < R600_NUM_HW_STAGES; i++)
+                       new_gprs[i] = def_gprs[i];
        }
 
        /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <=
@@ -2065,25 +2052,26 @@ bool r600_adjust_gprs(struct r600_context *rctx)
         * it will lockup. So in this case just discard the draw command
         * and don't change the current gprs repartitions.
         */
-       if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs ||
-           num_gs_gprs > new_num_gs_gprs || num_es_gprs > new_num_es_gprs) {
-               R600_ERR("shaders require too many register (%d + %d + %d + %d) "
-                        "for a combined maximum of %d\n",
-                        num_ps_gprs, num_vs_gprs, num_es_gprs, num_gs_gprs, max_gprs);
-               return false;
+       for (i = 0; i < R600_NUM_HW_STAGES; i++) {
+               if (num_gprs[i] > new_gprs[i]) {
+                       R600_ERR("shaders require too many register (%d + %d + %d + %d) "
+                                "for a combined maximum of %d\n",
+                                num_gprs[R600_HW_STAGE_PS], num_gprs[R600_HW_STAGE_VS], num_gprs[R600_HW_STAGE_ES], num_gprs[R600_HW_STAGE_GS], max_gprs);
+                       return false;
+               }
        }
 
        /* in some case we endup recomputing the current value */
-       tmp = S_008C04_NUM_PS_GPRS(new_num_ps_gprs) |
-               S_008C04_NUM_VS_GPRS(new_num_vs_gprs) |
+       tmp = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) |
+               S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) |
                S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
 
-       tmp2 = S_008C08_NUM_ES_GPRS(new_num_es_gprs) |
-               S_008C08_NUM_GS_GPRS(new_num_gs_gprs);
+       tmp2 = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) |
+               S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]);
        if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) {
                rctx->config_state.sq_gpr_resource_mgmt_1 = tmp;
                rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2;
-               rctx->config_state.atom.dirty = true;
+               r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
                rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
        }
        return true;
@@ -2128,6 +2116,12 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
        r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
        r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
 
+       /* This enables pipeline stat & streamout queries.
+        * They are only disabled by blits.
+        */
+       r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
+       r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
+
        family = rctx->b.family;
        ps_prio = 0;
        vs_prio = 1;
@@ -2175,10 +2169,11 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
                num_temp_gprs = 4;
                num_gs_gprs = 0;
                num_es_gprs = 0;
-               num_ps_threads = 136;
-               num_vs_threads = 48;
-               num_gs_threads = 4;
-               num_es_threads = 4;
+               /* use limits 40 VS and at least 16 ES/GS */
+               num_ps_threads = 120;
+               num_vs_threads = 40;
+               num_gs_threads = 16;
+               num_es_threads = 16;
                num_ps_stack_entries = 40;
                num_vs_stack_entries = 40;
                num_gs_stack_entries = 32;
@@ -2247,8 +2242,11 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
                break;
        }
 
-       rctx->default_ps_gprs = num_ps_gprs;
-       rctx->default_vs_gprs = num_vs_gprs;
+       rctx->default_gprs[R600_HW_STAGE_PS] = num_ps_gprs;
+       rctx->default_gprs[R600_HW_STAGE_VS] = num_vs_gprs;
+       rctx->default_gprs[R600_HW_STAGE_GS] = 0;
+       rctx->default_gprs[R600_HW_STAGE_ES] = 0;
+
        rctx->r6xx_num_clause_temp_gprs = num_temp_gprs;
 
        /* SQ_CONFIG */
@@ -2375,16 +2373,10 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
        r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
        r600_store_context_reg(cb, R_028A48_PA_SC_MPASS_PS_CNTL, 0);
 
-       r600_store_context_reg_seq(cb, R_028C0C_PA_CL_GB_VERT_CLIP_ADJ, 4);
-       r600_store_value(cb, 0x3F800000); /* R_028C0C_PA_CL_GB_VERT_CLIP_ADJ */
-       r600_store_value(cb, 0x3F800000); /* R_028C10_PA_CL_GB_VERT_DISC_ADJ */
-       r600_store_value(cb, 0x3F800000); /* R_028C14_PA_CL_GB_HORZ_CLIP_ADJ */
-       r600_store_value(cb, 0x3F800000); /* R_028C18_PA_CL_GB_HORZ_DISC_ADJ */
-
-       r600_store_context_reg_seq(cb, R_0282D0_PA_SC_VPORT_ZMIN_0, 2 * 16);
-       for (tmp = 0; tmp < 16; tmp++) {
+       r600_store_context_reg_seq(cb, R_0282D0_PA_SC_VPORT_ZMIN_0, 2 * R600_MAX_VIEWPORTS);
+       for (tmp = 0; tmp < R600_MAX_VIEWPORTS; tmp++) {
                r600_store_value(cb, 0); /* R_0282D0_PA_SC_VPORT_ZMIN_0 */
-               r600_store_value(cb, 0x3F800000); /* R_0282D4_PA_SC_VPORT_ZMAX_0 */
+               r600_store_value(cb, fui(1.0)); /* R_0282D4_PA_SC_VPORT_ZMAX_0 */
        }
 
        r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
@@ -2469,6 +2461,10 @@ void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *sha
 
                tmp = S_028644_SEMANTIC(sid);
 
+               /* D3D 9 behaviour. GL is undefined */
+               if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0)
+                       tmp |= S_028644_DEFAULT_VAL(3);
+
                if (rshader->input[i].name == TGSI_SEMANTIC_POSITION ||
                        rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
                        (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
@@ -2637,6 +2633,9 @@ void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *sha
                S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport);
 }
 
+#define RV610_GSVS_ALIGN 32
+#define R600_GSVS_ALIGN 16
+
 void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
@@ -2644,7 +2643,24 @@ void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *sha
        struct r600_shader *rshader = &shader->shader;
        struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
        unsigned gsvs_itemsize =
-                       (cp_shader->ring_item_size * rshader->gs_max_out_vertices) >> 2;
+                       (cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2;
+
+       /* some r600s needs gsvs itemsize aligned to cacheline size
+          this was fixed in rs780 and above. */
+       switch (rctx->b.family) {
+       case CHIP_RV610:
+               gsvs_itemsize = align(gsvs_itemsize, RV610_GSVS_ALIGN);
+               break;
+       case CHIP_R600:
+       case CHIP_RV630:
+       case CHIP_RV670:
+       case CHIP_RV620:
+       case CHIP_RV635:
+               gsvs_itemsize = align(gsvs_itemsize, R600_GSVS_ALIGN);
+               break;
+       default:
+               break;
+       }
 
        r600_init_command_buffer(cb, 64);
 
@@ -2653,19 +2669,16 @@ void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *sha
 
        if (rctx->b.chip_class >= R700) {
                r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
-                                      S_028B38_MAX_VERT_OUT(rshader->gs_max_out_vertices));
+                                      S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices));
        }
        r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
-                              r600_conv_prim_to_gs_out(rshader->gs_output_prim));
+                              r600_conv_prim_to_gs_out(shader->selector->gs_output_prim));
 
-       r600_store_context_reg_seq(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 4);
-       r600_store_value(cb, cp_shader->ring_item_size >> 2);
-       r600_store_value(cb, 0);
-       r600_store_value(cb, 0);
-       r600_store_value(cb, 0);
+       r600_store_context_reg(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE,
+                              cp_shader->ring_item_sizes[0] >> 2);
 
        r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE,
-                              (rshader->ring_item_size) >> 2);
+                              (rshader->ring_item_sizes[0]) >> 2);
 
        r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE,
                               gsvs_itemsize);
@@ -2767,6 +2780,7 @@ void r600_update_db_shader_control(struct r600_context * rctx)
 {
        bool dual_export;
        unsigned db_shader_control;
+       uint8_t ps_conservative_z;
 
        if (!rctx->ps_shader) {
                return;
@@ -2778,6 +2792,8 @@ void r600_update_db_shader_control(struct r600_context * rctx)
        db_shader_control = rctx->ps_shader->current->db_shader_control |
                            S_02880C_DUAL_EXPORT_ENABLE(dual_export);
 
+       ps_conservative_z = rctx->ps_shader->current->shader.ps_conservative_z;
+
        /* When alpha test is enabled we can't trust the hw to make the proper
         * decision on the order in which ztest should be run related to fragment
         * shader execution.
@@ -2791,22 +2807,23 @@ void r600_update_db_shader_control(struct r600_context * rctx)
                db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
        }
 
-       if (db_shader_control != rctx->db_misc_state.db_shader_control) {
+       if (db_shader_control != rctx->db_misc_state.db_shader_control ||
+               ps_conservative_z != rctx->db_misc_state.ps_conservative_z) {
                rctx->db_misc_state.db_shader_control = db_shader_control;
-               rctx->db_misc_state.atom.dirty = true;
+               rctx->db_misc_state.ps_conservative_z = ps_conservative_z;
+               r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
        }
 }
 
-static INLINE unsigned r600_array_mode(unsigned mode)
+static inline unsigned r600_array_mode(unsigned mode)
 {
        switch (mode) {
+       default:
        case RADEON_SURF_MODE_LINEAR_ALIGNED:   return V_0280A0_ARRAY_LINEAR_ALIGNED;
                break;
        case RADEON_SURF_MODE_1D:               return V_0280A0_ARRAY_1D_TILED_THIN1;
                break;
        case RADEON_SURF_MODE_2D:               return V_0280A0_ARRAY_2D_TILED_THIN1;
-       default:
-       case RADEON_SURF_MODE_LINEAR:           return V_0280A0_ARRAY_LINEAR_GENERAL;
        }
 }
 
@@ -2825,7 +2842,7 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx,
                                unsigned pitch,
                                unsigned bpp)
 {
-       struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
+       struct radeon_winsys_cs *cs = rctx->b.dma.cs;
        struct r600_texture *rsrc = (struct r600_texture*)src;
        struct r600_texture *rdst = (struct r600_texture*)dst;
        unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
@@ -2834,16 +2851,13 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx,
 
        dst_mode = rdst->surface.level[dst_level].mode;
        src_mode = rsrc->surface.level[src_level].mode;
-       /* downcast linear aligned to linear to simplify test */
-       src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
-       dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
        assert(dst_mode != src_mode);
 
        y = 0;
        lbpp = util_logbase2(bpp);
        pitch_tile_max = ((pitch / bpp) / 8) - 1;
 
-       if (dst_mode == RADEON_SURF_MODE_LINEAR) {
+       if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
                /* T2L */
                array_mode = r600_array_mode(src_mode);
                slice_tile_max = (rsrc->surface.level[src_level].nblk_x * rsrc->surface.level[src_level].nblk_y) / (8*8);
@@ -2892,29 +2906,30 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx,
         */
        cheight = ((R600_DMA_COPY_MAX_SIZE_DW * 4) / pitch) & 0xfffffff8;
        ncopy = (copy_height / cheight) + !!(copy_height % cheight);
-       r600_need_dma_space(&rctx->b, ncopy * 7);
+       r600_need_dma_space(&rctx->b, ncopy * 7, &rdst->resource, &rsrc->resource);
 
        for (i = 0; i < ncopy; i++) {
                cheight = cheight > copy_height ? copy_height : cheight;
                size = (cheight * pitch) / 4;
-               /* emit reloc before writting cs so that cs is always in consistent state */
-               r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ,
-                                     RADEON_PRIO_MIN);
-               r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE,
-                                     RADEON_PRIO_MIN);
-               cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, size);
-               cs->buf[cs->cdw++] = base >> 8;
-               cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) |
-                                       (lbpp << 24) | ((height - 1) << 10) |
-                                       pitch_tile_max;
-               cs->buf[cs->cdw++] = (slice_tile_max << 12) | (z << 0);
-               cs->buf[cs->cdw++] = (x << 3) | (y << 17);
-               cs->buf[cs->cdw++] = addr & 0xfffffffc;
-               cs->buf[cs->cdw++] = (addr >> 32UL) & 0xff;
+               /* emit reloc before writing cs so that cs is always in consistent state */
+               radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ,
+                                     RADEON_PRIO_SDMA_TEXTURE);
+               radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE,
+                                     RADEON_PRIO_SDMA_TEXTURE);
+               radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 1, 0, size));
+               radeon_emit(cs, base >> 8);
+               radeon_emit(cs, (detile << 31) | (array_mode << 27) |
+                               (lbpp << 24) | ((height - 1) << 10) |
+                               pitch_tile_max);
+               radeon_emit(cs, (slice_tile_max << 12) | (z << 0));
+               radeon_emit(cs, (x << 3) | (y << 17));
+               radeon_emit(cs, addr & 0xfffffffc);
+               radeon_emit(cs, (addr >> 32UL) & 0xff);
                copy_height -= cheight;
                addr += cheight * pitch;
                y += cheight;
        }
+       r600_dma_emit_wait_idle(&rctx->b);
        return TRUE;
 }
 
@@ -2934,7 +2949,7 @@ static void r600_dma_copy(struct pipe_context *ctx,
        unsigned src_x, src_y;
        unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
 
-       if (rctx->b.rings.dma.cs == NULL) {
+       if (rctx->b.dma.cs == NULL) {
                goto fallback;
        }
 
@@ -2946,9 +2961,10 @@ static void r600_dma_copy(struct pipe_context *ctx,
                return;
        }
 
-       if (src->format != dst->format || src_box->depth > 1) {
+       if (src_box->depth > 1 ||
+           !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty,
+                                       dstz, rsrc, src_level, src_box))
                goto fallback;
-       }
 
        src_x = util_format_get_nblocksx(src->format, src_box->x);
        dst_x = util_format_get_nblocksx(src->format, dst_x);
@@ -2964,9 +2980,6 @@ static void r600_dma_copy(struct pipe_context *ctx,
 
        dst_mode = rdst->surface.level[dst_level].mode;
        src_mode = rsrc->surface.level[src_level].mode;
-       /* downcast linear aligned to linear to simplify test */
-       src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
-       dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
 
        if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) {
                /* strict requirement on r6xx/r7xx */
@@ -3013,9 +3026,8 @@ fallback:
 
 void r600_init_state_functions(struct r600_context *rctx)
 {
-       unsigned id = 4;
-       int i;
-
+       unsigned id = 1;
+       unsigned i;
        /* !!!
         *  To avoid GPU lockup registers must be emited in a specific order
         * (no kidding ...). The order below is important and have been
@@ -3045,7 +3057,7 @@ void r600_init_state_functions(struct r600_context *rctx)
        r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0);
        r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0);
 
-       r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 7);
+       r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10);
 
        r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3);
        r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3);
@@ -3060,23 +3072,18 @@ void r600_init_state_functions(struct r600_context *rctx)
        r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7);
        r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11);
        r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
-       r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 6);
+       r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 9);
        r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
-       for (i = 0;i < 16; i++) {
-               r600_init_atom(rctx, &rctx->scissor[i].atom, id++, r600_emit_scissor_state, 4);
-               r600_init_atom(rctx, &rctx->viewport[i].atom, id++, r600_emit_viewport_state, 8);
-               rctx->scissor[i].idx = i;
-               rctx->viewport[i].idx = i;
-       }
+       r600_add_atom(rctx, &rctx->b.scissors.atom, id++);
+       r600_add_atom(rctx, &rctx->b.viewports.atom, id++);
        r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3);
        r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
        r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5);
-       rctx->atoms[id++] = &rctx->b.streamout.begin_atom;
-       rctx->atoms[id++] = &rctx->b.streamout.enable_atom;
-       r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
-       r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
-       r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);
-       r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0);
+       r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
+       r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
+       r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
+       for (i = 0; i < R600_NUM_HW_STAGES; i++)
+               r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
        r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0);
        r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0);
 
@@ -3088,7 +3095,6 @@ void r600_init_state_functions(struct r600_context *rctx)
        rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state;
        rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple;
        rctx->b.b.set_min_samples = r600_set_min_samples;
-       rctx->b.b.set_scissor_states = r600_set_scissor_states;
        rctx->b.b.get_sample_position = r600_get_sample_position;
        rctx->b.dma_copy = r600_dma_copy;
 }