X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fr600_state_common.c;h=31d08a877e13b8d556f36dfe5b0a529d821f944b;hb=9bc47dbe5062fe1f462f62bf3a2dda7b2f3ddea3;hp=d8c5fb284ef4b89784986f0cad2c8d8ce47b19d1;hpb=d663a557fd27d7c238248e19f22f2e6b05f03030;p=mesa.git diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c index d8c5fb284ef..31d08a877e1 100644 --- a/src/gallium/drivers/r600/r600_state_common.c +++ b/src/gallium/drivers/r600/r600_state_common.c @@ -25,28 +25,21 @@ * Jerome Glisse */ #include "r600_formats.h" +#include "r600_shader.h" #include "r600d.h" -#include "util/u_blitter.h" +#include "util/u_draw_quad.h" +#include "util/u_index_modify.h" +#include "util/u_memory.h" #include "util/u_upload_mgr.h" +#include "util/u_math.h" #include "tgsi/tgsi_parse.h" -#include -static void r600_emit_command_buffer(struct r600_context *rctx, struct r600_atom *atom) -{ - struct radeon_winsys_cs *cs = rctx->cs; - struct r600_command_buffer *cb = (struct r600_command_buffer*)atom; - - assert(cs->cdw + cb->atom.num_dw <= RADEON_MAX_CMDBUF_DWORDS); - memcpy(cs->buf + cs->cdw, cb->buf, 4 * cb->atom.num_dw); - cs->cdw += cb->atom.num_dw; -} +#define R600_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX -void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw, enum r600_atom_flags flags) +void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw) { - cb->atom.emit = r600_emit_command_buffer; - cb->atom.num_dw = 0; - cb->atom.flags = flags; + assert(!cb->buf); cb->buf = CALLOC(1, 4 * num_dw); cb->max_num_dw = num_dw; } @@ -56,43 +49,32 @@ void r600_release_command_buffer(struct r600_command_buffer *cb) FREE(cb->buf); } -static void r600_emit_surface_sync(struct r600_context *rctx, struct r600_atom *atom) -{ - struct radeon_winsys_cs *cs = rctx->cs; - struct r600_surface_sync_cmd *a = (struct r600_surface_sync_cmd*)atom; - - cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0); - cs->buf[cs->cdw++] = a->flush_flags; /* CP_COHER_CNTL */ - cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */ - cs->buf[cs->cdw++] = 0; /* CP_COHER_BASE */ - cs->buf[cs->cdw++] = 0x0000000A; /* POLL_INTERVAL */ - - a->flush_flags = 0; -} - -static void r600_emit_r6xx_flush_and_inv(struct r600_context *rctx, struct r600_atom *atom) +void r600_init_atom(struct r600_context *rctx, + struct r600_atom *atom, + unsigned id, + void (*emit)(struct r600_context *ctx, struct r600_atom *state), + unsigned num_dw) { - struct radeon_winsys_cs *cs = rctx->cs; - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0); + assert(id < R600_NUM_ATOMS); + assert(rctx->atoms[id] == NULL); + rctx->atoms[id] = atom; + atom->emit = (void*)emit; + atom->num_dw = num_dw; + atom->dirty = false; } -void r600_init_atom(struct r600_atom *atom, - void (*emit)(struct r600_context *ctx, struct r600_atom *state), - unsigned num_dw, enum r600_atom_flags flags) +void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom) { - atom->emit = emit; - atom->num_dw = num_dw; - atom->flags = flags; + r600_emit_command_buffer(rctx->b.rings.gfx.cs, ((struct r600_cso_state*)atom)->cb); } -static void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom) +void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom; unsigned alpha_ref = a->sx_alpha_ref; - if (rctx->chip_class >= EVERGREEN && a->cb0_export_16bpc) { + if (rctx->b.chip_class >= EVERGREEN && a->cb0_export_16bpc) { alpha_ref &= ~0x1FFF; } @@ -102,44 +84,19 @@ static void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_ato r600_write_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref); } -void r600_init_common_atoms(struct r600_context *rctx) -{ - r600_init_atom(&rctx->surface_sync_cmd.atom, r600_emit_surface_sync, 5, EMIT_EARLY); - r600_init_atom(&rctx->r6xx_flush_and_inv_cmd, r600_emit_r6xx_flush_and_inv, 2, EMIT_EARLY); - r600_init_atom(&rctx->alphatest_state.atom, r600_emit_alphatest_state, 3, 0); - r600_atom_dirty(rctx, &rctx->alphatest_state.atom); -} - -unsigned r600_get_cb_flush_flags(struct r600_context *rctx) -{ - unsigned flags = 0; - - if (rctx->framebuffer.nr_cbufs) { - flags |= S_0085F0_CB_ACTION_ENA(1) | - (((1 << rctx->framebuffer.nr_cbufs) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT); - } - - /* Workaround for broken flushing on some R6xx chipsets. */ - if (rctx->family == CHIP_RV670 || - rctx->family == CHIP_RS780 || - rctx->family == CHIP_RS880) { - flags |= S_0085F0_CB1_DEST_BASE_ENA(1) | - S_0085F0_DEST_BASE_0_ENA(1); - } - return flags; -} - -void r600_texture_barrier(struct pipe_context *ctx) +static void r600_texture_barrier(struct pipe_context *ctx) { struct r600_context *rctx = (struct r600_context *)ctx; - rctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1) | r600_get_cb_flush_flags(rctx); - r600_atom_dirty(rctx, &rctx->surface_sync_cmd.atom); + rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + R600_CONTEXT_FLUSH_AND_INV_CB | + R600_CONTEXT_FLUSH_AND_INV | + R600_CONTEXT_WAIT_3D_IDLE; } -static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim) +static unsigned r600_conv_pipe_prim(unsigned prim) { - static const int prim_conv[] = { + static const unsigned prim_conv[] = { V_008958_DI_PT_POINTLIST, V_008958_DI_PT_LINELIST, V_008958_DI_PT_LINELOOP, @@ -150,43 +107,43 @@ static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim) V_008958_DI_PT_QUADLIST, V_008958_DI_PT_QUADSTRIP, V_008958_DI_PT_POLYGON, - -1, - -1, - -1, - -1 + V_008958_DI_PT_LINELIST_ADJ, + V_008958_DI_PT_LINESTRIP_ADJ, + V_008958_DI_PT_TRILIST_ADJ, + V_008958_DI_PT_TRISTRIP_ADJ, + V_008958_DI_PT_RECTLIST }; - - *prim = prim_conv[pprim]; - if (*prim == -1) { - fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim); - return false; - } - return true; + return prim_conv[prim]; } /* common state between evergreen and r600 */ -void r600_bind_blend_state(struct pipe_context *ctx, void *state) + +static void r600_bind_blend_state_internal(struct r600_context *rctx, + struct r600_blend_state *blend, bool blend_disable) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state; - struct r600_pipe_state *rstate; + unsigned color_control; bool update_cb = false; - if (state == NULL) - return; - rstate = &blend->rstate; - rctx->states[rstate->id] = rstate; - rctx->dual_src_blend = blend->dual_src_blend; rctx->alpha_to_one = blend->alpha_to_one; - r600_context_pipe_state_set(rctx, rstate); + rctx->dual_src_blend = blend->dual_src_blend; + if (!blend_disable) { + r600_set_cso_state_with_cb(&rctx->blend_state, blend, &blend->buffer); + color_control = blend->cb_color_control; + } else { + /* Blending is disabled. */ + r600_set_cso_state_with_cb(&rctx->blend_state, blend, &blend->buffer_no_blend); + color_control = blend->cb_color_control_no_blend; + } + + /* Update derived states. */ if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) { rctx->cb_misc_state.blend_colormask = blend->cb_target_mask; update_cb = true; } - if (rctx->chip_class <= R700 && - rctx->cb_misc_state.cb_color_control != blend->cb_color_control) { - rctx->cb_misc_state.cb_color_control = blend->cb_color_control; + if (rctx->b.chip_class <= R700 && + rctx->cb_misc_state.cb_color_control != color_control) { + rctx->cb_misc_state.cb_color_control = color_control; update_cb = true; } if (rctx->cb_misc_state.dual_src_blend != blend->dual_src_blend) { @@ -194,64 +151,103 @@ void r600_bind_blend_state(struct pipe_context *ctx, void *state) update_cb = true; } if (update_cb) { - r600_atom_dirty(rctx, &rctx->cb_misc_state.atom); + rctx->cb_misc_state.atom.dirty = true; } } -void r600_set_blend_color(struct pipe_context *ctx, - const struct pipe_blend_color *state) +static void r600_bind_blend_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state); + struct r600_blend_state *blend = (struct r600_blend_state *)state; - if (rstate == NULL) + if (blend == NULL) return; - rstate->id = R600_PIPE_STATE_BLEND_COLOR; - r600_pipe_state_add_reg(rstate, R_028414_CB_BLEND_RED, fui(state->color[0])); - r600_pipe_state_add_reg(rstate, R_028418_CB_BLEND_GREEN, fui(state->color[1])); - r600_pipe_state_add_reg(rstate, R_02841C_CB_BLEND_BLUE, fui(state->color[2])); - r600_pipe_state_add_reg(rstate, R_028420_CB_BLEND_ALPHA, fui(state->color[3])); + r600_bind_blend_state_internal(rctx, blend, rctx->force_blend_disable); +} + +static void r600_set_blend_color(struct pipe_context *ctx, + const struct pipe_blend_color *state) +{ + struct r600_context *rctx = (struct r600_context *)ctx; - free(rctx->states[R600_PIPE_STATE_BLEND_COLOR]); - rctx->states[R600_PIPE_STATE_BLEND_COLOR] = rstate; - r600_context_pipe_state_set(rctx, rstate); + rctx->blend_color.state = *state; + rctx->blend_color.atom.dirty = true; +} + +void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct pipe_blend_color *state = &rctx->blend_color.state; + + r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4); + radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */ + radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */ + radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */ + radeon_emit(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */ +} + +void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct r600_vgt_state *a = (struct r600_vgt_state *)atom; + + r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en); + r600_write_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2); + radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */ + radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */ +} + +static void r600_set_clip_state(struct pipe_context *ctx, + const struct pipe_clip_state *state) +{ + struct r600_context *rctx = (struct r600_context *)ctx; + struct pipe_constant_buffer cb; + + rctx->clip_state.state = *state; + rctx->clip_state.atom.dirty = true; + + cb.buffer = NULL; + cb.user_buffer = state->ucp; + cb.buffer_offset = 0; + cb.buffer_size = 4*4*8; + ctx->set_constant_buffer(ctx, PIPE_SHADER_VERTEX, R600_UCP_CONST_BUFFER, &cb); + pipe_resource_reference(&cb.buffer, NULL); } static void r600_set_stencil_ref(struct pipe_context *ctx, const struct r600_stencil_ref *state) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state); - - if (rstate == NULL) - return; - rstate->id = R600_PIPE_STATE_STENCIL_REF; - r600_pipe_state_add_reg(rstate, - R_028430_DB_STENCILREFMASK, - S_028430_STENCILREF(state->ref_value[0]) | - S_028430_STENCILMASK(state->valuemask[0]) | - S_028430_STENCILWRITEMASK(state->writemask[0])); - r600_pipe_state_add_reg(rstate, - R_028434_DB_STENCILREFMASK_BF, - S_028434_STENCILREF_BF(state->ref_value[1]) | - S_028434_STENCILMASK_BF(state->valuemask[1]) | - S_028434_STENCILWRITEMASK_BF(state->writemask[1])); + rctx->stencil_ref.state = *state; + rctx->stencil_ref.atom.dirty = true; +} - free(rctx->states[R600_PIPE_STATE_STENCIL_REF]); - rctx->states[R600_PIPE_STATE_STENCIL_REF] = rstate; - r600_context_pipe_state_set(rctx, rstate); +void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom; + + r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2); + radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */ + S_028430_STENCILREF(a->state.ref_value[0]) | + S_028430_STENCILMASK(a->state.valuemask[0]) | + S_028430_STENCILWRITEMASK(a->state.writemask[0])); + radeon_emit(cs, /* R_028434_DB_STENCILREFMASK_BF */ + S_028434_STENCILREF_BF(a->state.ref_value[1]) | + S_028434_STENCILMASK_BF(a->state.valuemask[1]) | + S_028434_STENCILWRITEMASK_BF(a->state.writemask[1])); } -void r600_set_pipe_stencil_ref(struct pipe_context *ctx, - const struct pipe_stencil_ref *state) +static void r600_set_pipe_stencil_ref(struct pipe_context *ctx, + const struct pipe_stencil_ref *state) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_dsa *dsa = (struct r600_pipe_dsa*)rctx->states[R600_PIPE_STATE_DSA]; + struct r600_dsa_state *dsa = (struct r600_dsa_state*)rctx->dsa_state.cso; struct r600_stencil_ref ref; - rctx->stencil_ref = *state; + rctx->stencil_ref.pipe_state = *state; if (!dsa) return; @@ -266,25 +262,33 @@ void r600_set_pipe_stencil_ref(struct pipe_context *ctx, r600_set_stencil_ref(ctx, &ref); } -void r600_bind_dsa_state(struct pipe_context *ctx, void *state) +static void r600_bind_dsa_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_dsa *dsa = state; - struct r600_pipe_state *rstate; + struct r600_dsa_state *dsa = state; struct r600_stencil_ref ref; if (state == NULL) return; - rstate = &dsa->rstate; - rctx->states[rstate->id] = rstate; - r600_context_pipe_state_set(rctx, rstate); - ref.ref_value[0] = rctx->stencil_ref.ref_value[0]; - ref.ref_value[1] = rctx->stencil_ref.ref_value[1]; + r600_set_cso_state_with_cb(&rctx->dsa_state, dsa, &dsa->buffer); + + ref.ref_value[0] = rctx->stencil_ref.pipe_state.ref_value[0]; + ref.ref_value[1] = rctx->stencil_ref.pipe_state.ref_value[1]; ref.valuemask[0] = dsa->valuemask[0]; ref.valuemask[1] = dsa->valuemask[1]; ref.writemask[0] = dsa->writemask[0]; ref.writemask[1] = dsa->writemask[1]; + if (rctx->zwritemask != dsa->zwritemask) { + rctx->zwritemask = dsa->zwritemask; + if (rctx->b.chip_class >= EVERGREEN) { + /* work around some issue when not writting to zbuffer + * we are having lockup on evergreen so do not enable + * hyperz when not writting zbuffer + */ + rctx->db_misc_state.atom.dirty = true; + } + } r600_set_stencil_ref(ctx, &ref); @@ -293,77 +297,64 @@ void r600_bind_dsa_state(struct pipe_context *ctx, void *state) rctx->alphatest_state.sx_alpha_ref != dsa->alpha_ref) { rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control; rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref; - r600_atom_dirty(rctx, &rctx->alphatest_state.atom); + rctx->alphatest_state.atom.dirty = true; + if (rctx->b.chip_class >= EVERGREEN) { + evergreen_update_db_shader_control(rctx); + } else { + r600_update_db_shader_control(rctx); + } } } -void r600_set_max_scissor(struct r600_context *rctx) -{ - /* Set a scissor state such that it doesn't do anything. */ - struct pipe_scissor_state scissor; - scissor.minx = 0; - scissor.miny = 0; - scissor.maxx = 8192; - scissor.maxy = 8192; - - r600_set_scissor_state(rctx, &scissor); -} - -void r600_bind_rs_state(struct pipe_context *ctx, void *state) +static void r600_bind_rs_state(struct pipe_context *ctx, void *state) { - struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state; + struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state; struct r600_context *rctx = (struct r600_context *)ctx; if (state == NULL) return; - rctx->sprite_coord_enable = rs->sprite_coord_enable; - rctx->two_side = rs->two_side; - rctx->pa_sc_line_stipple = rs->pa_sc_line_stipple; - rctx->pa_cl_clip_cntl = rs->pa_cl_clip_cntl; - rctx->multisample_enable = rs->multisample_enable; - rctx->rasterizer = rs; - rctx->states[rs->rstate.id] = &rs->rstate; - r600_context_pipe_state_set(rctx, &rs->rstate); + r600_set_cso_state_with_cb(&rctx->rasterizer_state, rs, &rs->buffer); - if (rctx->chip_class >= EVERGREEN) { - evergreen_polygon_offset_update(rctx); - } else { - r600_polygon_offset_update(rctx); + if (rs->offset_enable && + (rs->offset_units != rctx->poly_offset_state.offset_units || + rs->offset_scale != rctx->poly_offset_state.offset_scale)) { + rctx->poly_offset_state.offset_units = rs->offset_units; + rctx->poly_offset_state.offset_scale = rs->offset_scale; + rctx->poly_offset_state.atom.dirty = true; } - /* Workaround for a missing scissor enable on r600. */ - if (rctx->chip_class == R600) { - if (rs->scissor_enable != rctx->scissor_enable) { - rctx->scissor_enable = rs->scissor_enable; + /* Update clip_misc_state. */ + if (rctx->clip_misc_state.pa_cl_clip_cntl != rs->pa_cl_clip_cntl || + rctx->clip_misc_state.clip_plane_enable != rs->clip_plane_enable) { + rctx->clip_misc_state.pa_cl_clip_cntl = rs->pa_cl_clip_cntl; + rctx->clip_misc_state.clip_plane_enable = rs->clip_plane_enable; + rctx->clip_misc_state.atom.dirty = true; + } - if (rs->scissor_enable) { - r600_set_scissor_state(rctx, &rctx->scissor_state); - } else { - r600_set_max_scissor(rctx); - } - } + /* Workaround for a missing scissor enable on r600. */ + if (rctx->b.chip_class == R600 && + rs->scissor_enable != rctx->scissor.enable) { + rctx->scissor.enable = rs->scissor_enable; + rctx->scissor.atom.dirty = true; } + + /* Re-emit PA_SC_LINE_STIPPLE. */ + rctx->last_primitive_type = -1; } -void r600_delete_rs_state(struct pipe_context *ctx, void *state) +static void r600_delete_rs_state(struct pipe_context *ctx, void *state) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state; + struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state; - if (rctx->rasterizer == rs) { - rctx->rasterizer = NULL; - } - if (rctx->states[rs->rstate.id] == &rs->rstate) { - rctx->states[rs->rstate.id] = NULL; - } - free(rs); + r600_release_command_buffer(&rs->buffer); + FREE(rs); } -void r600_sampler_view_destroy(struct pipe_context *ctx, - struct pipe_sampler_view *state) +static void r600_sampler_view_destroy(struct pipe_context *ctx, + struct pipe_sampler_view *state) { struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state; @@ -371,125 +362,135 @@ void r600_sampler_view_destroy(struct pipe_context *ctx, FREE(resource); } -static void r600_bind_samplers(struct pipe_context *pipe, +void r600_sampler_states_dirty(struct r600_context *rctx, + struct r600_sampler_states *state) +{ + if (state->dirty_mask) { + if (state->dirty_mask & state->has_bordercolor_mask) { + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; + } + state->atom.num_dw = + util_bitcount(state->dirty_mask & state->has_bordercolor_mask) * 11 + + util_bitcount(state->dirty_mask & ~state->has_bordercolor_mask) * 5; + state->atom.dirty = true; + } +} + +static void r600_bind_sampler_states(struct pipe_context *pipe, unsigned shader, unsigned start, unsigned count, void **states) { struct r600_context *rctx = (struct r600_context *)pipe; - struct r600_textures_info *dst; + struct r600_textures_info *dst = &rctx->samplers[shader]; + struct r600_pipe_sampler_state **rstates = (struct r600_pipe_sampler_state**)states; int seamless_cube_map = -1; unsigned i; + /* This sets 1-bit for states with index >= count. */ + uint32_t disable_mask = ~((1ull << count) - 1); + /* These are the new states set by this function. */ + uint32_t new_mask = 0; assert(start == 0); /* XXX fix below */ - switch (shader) { - case PIPE_SHADER_VERTEX: - dst = &rctx->vs_samplers; - break; - case PIPE_SHADER_FRAGMENT: - dst = &rctx->ps_samplers; - break; - default: - debug_error("bad shader in r600_bind_samplers()"); - return; - } - - memcpy(dst->samplers, states, sizeof(void*) * count); - dst->n_samplers = count; - dst->atom_sampler.num_dw = 0; - for (i = 0; i < count; i++) { - struct r600_pipe_sampler_state *sampler = states[i]; + struct r600_pipe_sampler_state *rstate = rstates[i]; - if (sampler == NULL) { + if (rstate == dst->states.states[i]) { continue; } - if (sampler->border_color_use) { - dst->atom_sampler.num_dw += 11; - rctx->flags |= R600_PARTIAL_FLUSH; + + if (rstate) { + if (rstate->border_color_use) { + dst->states.has_bordercolor_mask |= 1 << i; + } else { + dst->states.has_bordercolor_mask &= ~(1 << i); + } + seamless_cube_map = rstate->seamless_cube_map; + + new_mask |= 1 << i; } else { - dst->atom_sampler.num_dw += 5; + disable_mask |= 1 << i; } - seamless_cube_map = sampler->seamless_cube_map; } - if (rctx->chip_class <= R700 && seamless_cube_map != -1 && seamless_cube_map != rctx->seamless_cube_map.enabled) { + + memcpy(dst->states.states, rstates, sizeof(void*) * count); + memset(dst->states.states + count, 0, sizeof(void*) * (NUM_TEX_UNITS - count)); + + dst->states.enabled_mask &= ~disable_mask; + dst->states.dirty_mask &= dst->states.enabled_mask; + dst->states.enabled_mask |= new_mask; + dst->states.dirty_mask |= new_mask; + dst->states.has_bordercolor_mask &= dst->states.enabled_mask; + + r600_sampler_states_dirty(rctx, &dst->states); + + /* Seamless cubemap state. */ + if (rctx->b.chip_class <= R700 && + seamless_cube_map != -1 && + seamless_cube_map != rctx->seamless_cube_map.enabled) { /* change in TA_CNTL_AUX need a pipeline flush */ - rctx->flags |= R600_PARTIAL_FLUSH; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; rctx->seamless_cube_map.enabled = seamless_cube_map; - r600_atom_dirty(rctx, &rctx->seamless_cube_map.atom); - } - if (dst->atom_sampler.num_dw) { - r600_atom_dirty(rctx, &dst->atom_sampler); + rctx->seamless_cube_map.atom.dirty = true; } } -void r600_bind_vs_samplers(struct pipe_context *ctx, unsigned count, void **states) +static void r600_bind_vs_sampler_states(struct pipe_context *ctx, unsigned count, void **states) { - r600_bind_samplers(ctx, PIPE_SHADER_VERTEX, 0, count, states); + r600_bind_sampler_states(ctx, PIPE_SHADER_VERTEX, 0, count, states); } -void r600_bind_ps_samplers(struct pipe_context *ctx, unsigned count, void **states) +static void r600_bind_ps_sampler_states(struct pipe_context *ctx, unsigned count, void **states) { - r600_bind_samplers(ctx, PIPE_SHADER_FRAGMENT, 0, count, states); + r600_bind_sampler_states(ctx, PIPE_SHADER_FRAGMENT, 0, count, states); } -void r600_delete_sampler(struct pipe_context *ctx, void *state) +static void r600_delete_sampler_state(struct pipe_context *ctx, void *state) { free(state); } -void r600_delete_state(struct pipe_context *ctx, void *state) +static void r600_delete_blend_state(struct pipe_context *ctx, void *state) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_pipe_state *rstate = (struct r600_pipe_state *)state; + struct r600_blend_state *blend = (struct r600_blend_state*)state; - if (rctx->states[rstate->id] == rstate) { - rctx->states[rstate->id] = NULL; - } - for (int i = 0; i < rstate->nregs; i++) { - pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL); - } - free(rstate); + r600_release_command_buffer(&blend->buffer); + r600_release_command_buffer(&blend->buffer_no_blend); + FREE(blend); } -void r600_bind_vertex_elements(struct pipe_context *ctx, void *state) +static void r600_delete_dsa_state(struct pipe_context *ctx, void *state) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_vertex_element *v = (struct r600_vertex_element*)state; + struct r600_dsa_state *dsa = (struct r600_dsa_state *)state; - rctx->vertex_elements = v; - if (v) { - r600_inval_shader_cache(rctx); - - rctx->states[v->rstate.id] = &v->rstate; - r600_context_pipe_state_set(rctx, &v->rstate); - } + r600_release_command_buffer(&dsa->buffer); + free(dsa); } -void r600_delete_vertex_element(struct pipe_context *ctx, void *state) +static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_vertex_element *v = (struct r600_vertex_element*)state; - if (rctx->states[v->rstate.id] == &v->rstate) { - rctx->states[v->rstate.id] = NULL; - } - if (rctx->vertex_elements == state) - rctx->vertex_elements = NULL; + r600_set_cso_state(&rctx->vertex_fetch_shader, state); +} - pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL); - FREE(state); +static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state) +{ + struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state; + pipe_resource_reference((struct pipe_resource**)&shader->buffer, NULL); + FREE(shader); } -void r600_set_index_buffer(struct pipe_context *ctx, +static void r600_set_index_buffer(struct pipe_context *ctx, const struct pipe_index_buffer *ib) { struct r600_context *rctx = (struct r600_context *)ctx; if (ib) { pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer); - memcpy(&rctx->index_buffer, ib, sizeof(*ib)); + memcpy(&rctx->index_buffer, ib, sizeof(*ib)); + r600_context_add_resource_size(ctx, ib->buffer); } else { pipe_resource_reference(&rctx->index_buffer.buffer, NULL); } @@ -498,48 +499,50 @@ void r600_set_index_buffer(struct pipe_context *ctx, void r600_vertex_buffers_dirty(struct r600_context *rctx) { if (rctx->vertex_buffer_state.dirty_mask) { - r600_inval_vertex_cache(rctx); - rctx->vertex_buffer_state.atom.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 11) * + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) * util_bitcount(rctx->vertex_buffer_state.dirty_mask); - r600_atom_dirty(rctx, &rctx->vertex_buffer_state.atom); + rctx->vertex_buffer_state.atom.dirty = true; } } -void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count, - const struct pipe_vertex_buffer *input) +static void r600_set_vertex_buffers(struct pipe_context *ctx, + unsigned start_slot, unsigned count, + const struct pipe_vertex_buffer *input) { struct r600_context *rctx = (struct r600_context *)ctx; struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state; - struct pipe_vertex_buffer *vb = state->vb; + struct pipe_vertex_buffer *vb = state->vb + start_slot; unsigned i; - /* This sets 1-bit for buffers with index >= count. */ - uint32_t disable_mask = ~((1ull << count) - 1); + uint32_t disable_mask = 0; /* These are the new buffers set by this function. */ uint32_t new_buffer_mask = 0; - /* Set buffers with index >= count to NULL. */ - uint32_t remaining_buffers_mask = - rctx->vertex_buffer_state.enabled_mask & disable_mask; - - while (remaining_buffers_mask) { - i = u_bit_scan(&remaining_buffers_mask); - pipe_resource_reference(&vb[i].buffer, NULL); - } - /* Set vertex buffers. */ - for (i = 0; i < count; i++) { - if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) { - if (input[i].buffer) { - vb[i].stride = input[i].stride; - vb[i].buffer_offset = input[i].buffer_offset; - pipe_resource_reference(&vb[i].buffer, input[i].buffer); - new_buffer_mask |= 1 << i; - } else { - pipe_resource_reference(&vb[i].buffer, NULL); - disable_mask |= 1 << i; + if (input) { + for (i = 0; i < count; i++) { + if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) { + if (input[i].buffer) { + vb[i].stride = input[i].stride; + vb[i].buffer_offset = input[i].buffer_offset; + pipe_resource_reference(&vb[i].buffer, input[i].buffer); + new_buffer_mask |= 1 << i; + r600_context_add_resource_size(ctx, input[i].buffer); + } else { + pipe_resource_reference(&vb[i].buffer, NULL); + disable_mask |= 1 << i; + } } } - } + } else { + for (i = 0; i < count; i++) { + pipe_resource_reference(&vb[i].buffer, NULL); + } + disable_mask = ((1ull << count) - 1); + } + + disable_mask <<= start_slot; + new_buffer_mask <<= start_slot; rctx->vertex_buffer_state.enabled_mask &= ~disable_mask; rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask; @@ -553,22 +556,21 @@ void r600_sampler_views_dirty(struct r600_context *rctx, struct r600_samplerview_state *state) { if (state->dirty_mask) { - r600_inval_texture_cache(rctx); - state->atom.num_dw = (rctx->chip_class >= EVERGREEN ? 14 : 13) * + rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; + state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) * util_bitcount(state->dirty_mask); - r600_atom_dirty(rctx, &state->atom); + state->atom.dirty = true; } } -void r600_set_sampler_views(struct pipe_context *pipe, - unsigned shader, - unsigned start, - unsigned count, - struct pipe_sampler_view **views) +static void r600_set_sampler_views(struct pipe_context *pipe, unsigned shader, + unsigned start, unsigned count, + struct pipe_sampler_view **views) { struct r600_context *rctx = (struct r600_context *) pipe; - struct r600_textures_info *dst; + struct r600_textures_info *dst = &rctx->samplers[shader]; struct r600_pipe_sampler_view **rviews = (struct r600_pipe_sampler_view **)views; + uint32_t dirty_sampler_states_mask = 0; unsigned i; /* This sets 1-bit for textures with index >= count. */ uint32_t disable_mask = ~((1ull << count) - 1); @@ -580,18 +582,6 @@ void r600_set_sampler_views(struct pipe_context *pipe, assert(start == 0); /* XXX fix below */ - switch (shader) { - case PIPE_SHADER_VERTEX: - dst = &rctx->vs_samplers; - break; - case PIPE_SHADER_FRAGMENT: - dst = &rctx->ps_samplers; - break; - default: - debug_error("bad shader in r600_set_sampler_views()"); - return; - } - remaining_mask = dst->views.enabled_mask & disable_mask; while (remaining_mask) { @@ -607,25 +597,35 @@ void r600_set_sampler_views(struct pipe_context *pipe, } if (rviews[i]) { - struct r600_resource_texture *rtex = - (struct r600_resource_texture*)rviews[i]->base.texture; - - if (rtex->is_depth && !rtex->is_flushing_texture) { - dst->views.depth_texture_mask |= 1 << i; - } else { - dst->views.depth_texture_mask &= ~(1 << i); + struct r600_texture *rtex = + (struct r600_texture*)rviews[i]->base.texture; + + if (rviews[i]->base.texture->target != PIPE_BUFFER) { + if (rtex->is_depth && !rtex->is_flushing_texture) { + dst->views.compressed_depthtex_mask |= 1 << i; + } else { + dst->views.compressed_depthtex_mask &= ~(1 << i); + } + + /* Track compressed colorbuffers. */ + if (rtex->cmask_size && rtex->fmask_size) { + dst->views.compressed_colortex_mask |= 1 << i; + } else { + dst->views.compressed_colortex_mask &= ~(1 << i); + } } - - /* Changing from array to non-arrays textures and vice - * versa requires updating TEX_ARRAY_OVERRIDE on R6xx-R7xx. */ - if (rctx->chip_class <= R700 && + /* Changing from array to non-arrays textures and vice versa requires + * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */ + if (rctx->b.chip_class <= R700 && + (dst->states.enabled_mask & (1 << i)) && (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY || rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) { - r600_atom_dirty(rctx, &dst->atom_sampler); + dirty_sampler_states_mask |= 1 << i; } pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]); new_mask |= 1 << i; + r600_context_add_resource_size(pipe, views[i]->texture); } else { pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL); disable_mask |= 1 << i; @@ -636,47 +636,73 @@ void r600_set_sampler_views(struct pipe_context *pipe, dst->views.dirty_mask &= dst->views.enabled_mask; dst->views.enabled_mask |= new_mask; dst->views.dirty_mask |= new_mask; - dst->views.depth_texture_mask &= dst->views.enabled_mask; - + dst->views.compressed_depthtex_mask &= dst->views.enabled_mask; + dst->views.compressed_colortex_mask &= dst->views.enabled_mask; + dst->views.dirty_txq_constants = TRUE; + dst->views.dirty_buffer_constants = TRUE; r600_sampler_views_dirty(rctx, &dst->views); + + if (dirty_sampler_states_mask) { + dst->states.dirty_mask |= dirty_sampler_states_mask; + r600_sampler_states_dirty(rctx, &dst->states); + } } -void *r600_create_vertex_elements(struct pipe_context *ctx, - unsigned count, - const struct pipe_vertex_element *elements) +static void r600_set_vs_sampler_views(struct pipe_context *ctx, unsigned count, + struct pipe_sampler_view **views) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element); + r600_set_sampler_views(ctx, PIPE_SHADER_VERTEX, 0, count, views); +} - assert(count < 32); - if (!v) - return NULL; +static void r600_set_ps_sampler_views(struct pipe_context *ctx, unsigned count, + struct pipe_sampler_view **views) +{ + r600_set_sampler_views(ctx, PIPE_SHADER_FRAGMENT, 0, count, views); +} - v->count = count; - memcpy(v->elements, elements, sizeof(struct pipe_vertex_element) * count); +static void r600_set_viewport_states(struct pipe_context *ctx, + unsigned start_slot, + unsigned num_viewports, + const struct pipe_viewport_state *state) +{ + struct r600_context *rctx = (struct r600_context *)ctx; - if (r600_vertex_elements_build_fetch_shader(rctx, v)) { - FREE(v); - return NULL; - } + rctx->viewport.state = *state; + rctx->viewport.atom.dirty = true; +} - return v; +void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct pipe_viewport_state *state = &rctx->viewport.state; + + r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0, 6); + radeon_emit(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */ + radeon_emit(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */ + radeon_emit(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */ + radeon_emit(cs, fui(state->translate[1])); /* R_028448_PA_CL_VPORT_YOFFSET_0 */ + radeon_emit(cs, fui(state->scale[2])); /* R_02844C_PA_CL_VPORT_ZSCALE_0 */ + radeon_emit(cs, fui(state->translate[2])); /* R_028450_PA_CL_VPORT_ZOFFSET_0 */ } /* Compute the key for the hw shader variant */ -static INLINE unsigned r600_shader_selector_key(struct pipe_context * ctx, +static INLINE struct r600_shader_key r600_shader_selector_key(struct pipe_context * ctx, struct r600_pipe_shader_selector * sel) { struct r600_context *rctx = (struct r600_context *)ctx; - unsigned key; + struct r600_shader_key key; + memset(&key, 0, sizeof(key)); if (sel->type == PIPE_SHADER_FRAGMENT) { - key = rctx->two_side | - ((rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer) << 1) | - (MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 2); - } else - key = 0; - + key.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side; + key.alpha_to_one = rctx->alpha_to_one && + rctx->rasterizer && rctx->rasterizer->multisample_enable && + !rctx->framebuffer.cb0_is_integer; + key.nr_cbufs = rctx->framebuffer.state.nr_cbufs; + /* Dual-source blending only makes sense with nr_cbufs == 1. */ + if (key.nr_cbufs == 1 && rctx->dual_src_blend) + key.nr_cbufs = 2; + } return key; } @@ -684,20 +710,21 @@ static INLINE unsigned r600_shader_selector_key(struct pipe_context * ctx, * (*dirty) is set to 1 if current variant was changed */ static int r600_shader_select(struct pipe_context *ctx, struct r600_pipe_shader_selector* sel, - unsigned *dirty) + bool *dirty) { - unsigned key; + struct r600_shader_key key; struct r600_context *rctx = (struct r600_context *)ctx; struct r600_pipe_shader * shader = NULL; int r; + memset(&key, 0, sizeof(key)); key = r600_shader_selector_key(ctx, sel); /* Check if we don't need to change anything. * This path is also used for most shaders that don't need multiple * variants, it will cost just a computation of the key and this * test. */ - if (likely(sel->current && sel->current->key == key)) { + if (likely(sel->current && memcmp(&sel->current->key, &key, sizeof(key)) == 0)) { return 0; } @@ -705,7 +732,7 @@ static int r600_shader_select(struct pipe_context *ctx, if (sel->num_shaders > 1) { struct r600_pipe_shader *p = sel->current, *c = p->next_variant; - while (c && c->key != key) { + while (c && memcmp(&c->key, &key, sizeof(key)) != 0) { p = c; c = c->next_variant; } @@ -720,11 +747,12 @@ static int r600_shader_select(struct pipe_context *ctx, shader = CALLOC(1, sizeof(struct r600_pipe_shader)); shader->selector = sel; - r = r600_pipe_shader_create(ctx, shader); + r = r600_pipe_shader_create(ctx, shader, key); if (unlikely(r)) { - R600_ERR("Failed to build shader variant (type=%u, key=%u) %d\n", - sel->type, key, r); + R600_ERR("Failed to build shader variant (type=%u) %d\n", + sel->type, r); sel->current = NULL; + FREE(shader); return r; } @@ -737,24 +765,20 @@ static int r600_shader_select(struct pipe_context *ctx, key = r600_shader_selector_key(ctx, sel); } - shader->key = key; + memcpy(&shader->key, &key, sizeof(key)); sel->num_shaders++; } if (dirty) - *dirty = 1; + *dirty = true; shader->next_variant = sel->current; sel->current = shader; - if (rctx->chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) { - r600_adjust_gprs(rctx); - } - if (rctx->ps_shader && rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) { rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs; - r600_atom_dirty(rctx, &rctx->cb_misc_state.atom); + rctx->cb_misc_state.atom.dirty = true; } return 0; } @@ -777,56 +801,71 @@ static void *r600_create_shader_state(struct pipe_context *ctx, return sel; } -void *r600_create_shader_state_ps(struct pipe_context *ctx, - const struct pipe_shader_state *state) +static void *r600_create_ps_state(struct pipe_context *ctx, + const struct pipe_shader_state *state) { return r600_create_shader_state(ctx, state, PIPE_SHADER_FRAGMENT); } -void *r600_create_shader_state_vs(struct pipe_context *ctx, - const struct pipe_shader_state *state) +static void *r600_create_vs_state(struct pipe_context *ctx, + const struct pipe_shader_state *state) { return r600_create_shader_state(ctx, state, PIPE_SHADER_VERTEX); } -void r600_bind_ps_shader(struct pipe_context *ctx, void *state) +static void r600_bind_ps_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; if (!state) state = rctx->dummy_pixel_shader; - rctx->ps_shader = (struct r600_pipe_shader_selector *)state; - r600_context_pipe_state_set(rctx, &rctx->ps_shader->current->rstate); + rctx->pixel_shader.shader = rctx->ps_shader = (struct r600_pipe_shader_selector *)state; + rctx->pixel_shader.atom.num_dw = rctx->ps_shader->current->command_buffer.num_dw; + rctx->pixel_shader.atom.dirty = true; + + r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->ps_shader->current->bo); - if (rctx->chip_class <= R700) { + if (rctx->b.chip_class <= R700) { bool multiwrite = rctx->ps_shader->current->shader.fs_write_all; if (rctx->cb_misc_state.multiwrite != multiwrite) { rctx->cb_misc_state.multiwrite = multiwrite; - r600_atom_dirty(rctx, &rctx->cb_misc_state.atom); + rctx->cb_misc_state.atom.dirty = true; } - - if (rctx->vs_shader) - r600_adjust_gprs(rctx); } if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) { rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs; - r600_atom_dirty(rctx, &rctx->cb_misc_state.atom); + rctx->cb_misc_state.atom.dirty = true; + } + + if (rctx->b.chip_class >= EVERGREEN) { + evergreen_update_db_shader_control(rctx); + } else { + r600_update_db_shader_control(rctx); } } -void r600_bind_vs_shader(struct pipe_context *ctx, void *state) +static void r600_bind_vs_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; - rctx->vs_shader = (struct r600_pipe_shader_selector *)state; - if (state) { - r600_context_pipe_state_set(rctx, &rctx->vs_shader->current->rstate); + if (!state) + return; + + rctx->vertex_shader.shader = rctx->vs_shader = (struct r600_pipe_shader_selector *)state; + rctx->vertex_shader.atom.dirty = true; + rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride; - if (rctx->chip_class < EVERGREEN && rctx->ps_shader) - r600_adjust_gprs(rctx); + r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->vs_shader->current->bo); + + /* Update clip misc state. */ + if (rctx->vs_shader->current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl || + rctx->vs_shader->current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write) { + rctx->clip_misc_state.pa_cl_vs_out_cntl = rctx->vs_shader->current->pa_cl_vs_out_cntl; + rctx->clip_misc_state.clip_dist_write = rctx->vs_shader->current->shader.clip_dist_write; + rctx->clip_misc_state.atom.dirty = true; } } @@ -846,7 +885,7 @@ static void r600_delete_shader_selector(struct pipe_context *ctx, } -void r600_delete_ps_shader(struct pipe_context *ctx, void *state) +static void r600_delete_ps_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; @@ -858,7 +897,7 @@ void r600_delete_ps_shader(struct pipe_context *ctx, void *state) r600_delete_shader_selector(ctx, sel); } -void r600_delete_vs_shader(struct pipe_context *ctx, void *state) +static void r600_delete_vs_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state; @@ -873,36 +912,25 @@ void r600_delete_vs_shader(struct pipe_context *ctx, void *state) void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state) { if (state->dirty_mask) { - r600_inval_shader_cache(rctx); - state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20 + rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; + state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20 : util_bitcount(state->dirty_mask)*19; - r600_atom_dirty(rctx, &state->atom); + state->atom.dirty = true; } } -void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, - struct pipe_constant_buffer *input) +static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, + struct pipe_constant_buffer *input) { struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_constbuf_state *state; + struct r600_constbuf_state *state = &rctx->constbuf_state[shader]; struct pipe_constant_buffer *cb; const uint8_t *ptr; - switch (shader) { - case PIPE_SHADER_VERTEX: - state = &rctx->vs_constbuf_state; - break; - case PIPE_SHADER_FRAGMENT: - state = &rctx->ps_constbuf_state; - break; - default: - return; - } - /* Note that the state tracker can unbind constant buffers by * passing NULL here. */ - if (unlikely(!input)) { + if (unlikely(!input || (!input->buffer && !input->user_buffer))) { state->enabled_mask &= ~(1 << index); state->dirty_mask &= ~(1 << index); pipe_resource_reference(&state->cb[index].buffer, NULL); @@ -926,7 +954,7 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, } for (i = 0; i < size / 4; ++i) { - tmpPtr[i] = bswap_32(((uint32_t *)ptr)[i]); + tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]); } u_upload_data(rctx->uploader, 0, size, tmpPtr, &cb->buffer_offset, &cb->buffer); @@ -934,10 +962,13 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, } else { u_upload_data(rctx->uploader, 0, input->buffer_size, ptr, &cb->buffer_offset, &cb->buffer); } + /* account it in gtt */ + rctx->b.gtt += input->buffer_size; } else { /* Setup the hw buffer. */ cb->buffer_offset = input->buffer_offset; pipe_resource_reference(&cb->buffer, input->buffer); + r600_context_add_resource_size(ctx, input->buffer); } state->enabled_mask |= 1 << index; @@ -945,119 +976,209 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, r600_constant_buffers_dirty(rctx, state); } -struct pipe_stream_output_target * -r600_create_so_target(struct pipe_context *ctx, - struct pipe_resource *buffer, - unsigned buffer_offset, - unsigned buffer_size) +static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_so_target *t; - void *ptr; - - t = CALLOC_STRUCT(r600_so_target); - if (!t) { - return NULL; - } - - t->b.reference.count = 1; - t->b.context = ctx; - pipe_resource_reference(&t->b.buffer, buffer); - t->b.buffer_offset = buffer_offset; - t->b.buffer_size = buffer_size; + struct r600_context *rctx = (struct r600_context*)pipe; - t->filled_size = (struct r600_resource*) - pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4); - ptr = rctx->ws->buffer_map(t->filled_size->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); - memset(ptr, 0, t->filled_size->buf->size); - rctx->ws->buffer_unmap(t->filled_size->cs_buf); + if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask) + return; - return &t->b; + rctx->sample_mask.sample_mask = sample_mask; + rctx->sample_mask.atom.dirty = true; } -void r600_so_target_destroy(struct pipe_context *ctx, - struct pipe_stream_output_target *target) +/* + * On r600/700 hw we don't have vertex fetch swizzle, though TBO + * doesn't require full swizzles it does need masking and setting alpha + * to one, so we setup a set of 5 constants with the masks + alpha value + * then in the shader, we AND the 4 components with 0xffffffff or 0, + * then OR the alpha with the value given here. + * We use a 6th constant to store the txq buffer size in + */ +static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_type) { - struct r600_so_target *t = (struct r600_so_target*)target; - pipe_resource_reference(&t->b.buffer, NULL); - pipe_resource_reference((struct pipe_resource**)&t->filled_size, NULL); - FREE(t); -} + struct r600_textures_info *samplers = &rctx->samplers[shader_type]; + int bits; + uint32_t array_size; + struct pipe_constant_buffer cb; + int i, j; -void r600_set_so_targets(struct pipe_context *ctx, - unsigned num_targets, - struct pipe_stream_output_target **targets, - unsigned append_bitmask) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - unsigned i; + if (!samplers->views.dirty_buffer_constants) + return; - /* Stop streamout. */ - if (rctx->num_so_targets && !rctx->streamout_start) { - r600_context_streamout_end(rctx); + samplers->views.dirty_buffer_constants = FALSE; + + bits = util_last_bit(samplers->views.enabled_mask); + array_size = bits * 8 * sizeof(uint32_t) * 4; + samplers->buffer_constants = realloc(samplers->buffer_constants, array_size); + memset(samplers->buffer_constants, 0, array_size); + for (i = 0; i < bits; i++) { + if (samplers->views.enabled_mask & (1 << i)) { + int offset = i * 8; + const struct util_format_description *desc; + desc = util_format_description(samplers->views.views[i]->base.format); + + for (j = 0; j < 4; j++) + if (j < desc->nr_channels) + samplers->buffer_constants[offset+j] = 0xffffffff; + else + samplers->buffer_constants[offset+j] = 0x0; + if (desc->nr_channels < 4) { + if (desc->channel[0].pure_integer) + samplers->buffer_constants[offset+4] = 1; + else + samplers->buffer_constants[offset+4] = 0x3f800000; + } else + samplers->buffer_constants[offset + 4] = 0; + + samplers->buffer_constants[offset + 5] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format); + } } - /* Set the new targets. */ - for (i = 0; i < num_targets; i++) { - pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->so_targets[i], targets[i]); - } - for (; i < rctx->num_so_targets; i++) { - pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->so_targets[i], NULL); - } + cb.buffer = NULL; + cb.user_buffer = samplers->buffer_constants; + cb.buffer_offset = 0; + cb.buffer_size = array_size; + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); + pipe_resource_reference(&cb.buffer, NULL); +} + +/* On evergreen we only need to store the buffer size for TXQ */ +static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type) +{ + struct r600_textures_info *samplers = &rctx->samplers[shader_type]; + int bits; + uint32_t array_size; + struct pipe_constant_buffer cb; + int i; - rctx->num_so_targets = num_targets; - rctx->streamout_start = num_targets != 0; - rctx->streamout_append_bitmask = append_bitmask; + if (!samplers->views.dirty_buffer_constants) + return; + + samplers->views.dirty_buffer_constants = FALSE; + + bits = util_last_bit(samplers->views.enabled_mask); + array_size = bits * sizeof(uint32_t) * 4; + samplers->buffer_constants = realloc(samplers->buffer_constants, array_size); + memset(samplers->buffer_constants, 0, array_size); + for (i = 0; i < bits; i++) + if (samplers->views.enabled_mask & (1 << i)) + samplers->buffer_constants[i] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format); + + cb.buffer = NULL; + cb.user_buffer = samplers->buffer_constants; + cb.buffer_offset = 0; + cb.buffer_size = array_size; + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); + pipe_resource_reference(&cb.buffer, NULL); } -void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) +static void r600_setup_txq_cube_array_constants(struct r600_context *rctx, int shader_type) { - struct r600_context *rctx = (struct r600_context*)pipe; + struct r600_textures_info *samplers = &rctx->samplers[shader_type]; + int bits; + uint32_t array_size; + struct pipe_constant_buffer cb; + int i; - if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask) + if (!samplers->views.dirty_txq_constants) return; - rctx->sample_mask.sample_mask = sample_mask; - r600_atom_dirty(rctx, &rctx->sample_mask.atom); + samplers->views.dirty_txq_constants = FALSE; + + bits = util_last_bit(samplers->views.enabled_mask); + array_size = bits * sizeof(uint32_t) * 4; + samplers->txq_constants = realloc(samplers->txq_constants, array_size); + memset(samplers->txq_constants, 0, array_size); + for (i = 0; i < bits; i++) + if (samplers->views.enabled_mask & (1 << i)) + samplers->txq_constants[i] = samplers->views.views[i]->base.texture->array_size / 6; + + cb.buffer = NULL; + cb.user_buffer = samplers->txq_constants; + cb.buffer_offset = 0; + cb.buffer_size = array_size; + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_TXQ_CONST_BUFFER, &cb); + pipe_resource_reference(&cb.buffer, NULL); } -static void r600_update_derived_state(struct r600_context *rctx) +static bool r600_update_derived_state(struct r600_context *rctx) { struct pipe_context * ctx = (struct pipe_context*)rctx; - unsigned ps_dirty = 0; + bool ps_dirty = false; + bool blend_disable; if (!rctx->blitter->running) { - /* Flush depth textures which need to be flushed. */ - if (rctx->vs_samplers.views.depth_texture_mask) { - r600_flush_depth_textures(rctx, &rctx->vs_samplers.views); - } - if (rctx->ps_samplers.views.depth_texture_mask) { - r600_flush_depth_textures(rctx, &rctx->ps_samplers.views); + unsigned i; + + /* Decompress textures if needed. */ + for (i = 0; i < PIPE_SHADER_TYPES; i++) { + struct r600_samplerview_state *views = &rctx->samplers[i].views; + if (views->compressed_depthtex_mask) { + r600_decompress_depth_textures(rctx, views); + } + if (views->compressed_colortex_mask) { + r600_decompress_color_textures(rctx, views); + } } } r600_shader_select(ctx, rctx->ps_shader, &ps_dirty); - if (rctx->ps_shader && ((rctx->sprite_coord_enable && - (rctx->ps_shader->current->sprite_coord_enable != rctx->sprite_coord_enable)) || - (rctx->rasterizer && rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade))) { + if (rctx->ps_shader && rctx->rasterizer && + ((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) || + (rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade))) { - if (rctx->chip_class >= EVERGREEN) - evergreen_pipe_shader_ps(ctx, rctx->ps_shader->current); + if (rctx->b.chip_class >= EVERGREEN) + evergreen_update_ps_state(ctx, rctx->ps_shader->current); else - r600_pipe_shader_ps(ctx, rctx->ps_shader->current); + r600_update_ps_state(ctx, rctx->ps_shader->current); - ps_dirty = 1; + ps_dirty = true; } - if (ps_dirty) - r600_context_pipe_state_set(rctx, &rctx->ps_shader->current->rstate); - - if (rctx->chip_class >= EVERGREEN) { - evergreen_update_dual_export_state(rctx); + if (ps_dirty) { + rctx->pixel_shader.atom.num_dw = rctx->ps_shader->current->command_buffer.num_dw; + rctx->pixel_shader.atom.dirty = true; + } + + /* on R600 we stuff masks + txq info into one constant buffer */ + /* on evergreen we only need a txq info one */ + if (rctx->b.chip_class < EVERGREEN) { + if (rctx->ps_shader && rctx->ps_shader->current->shader.uses_tex_buffers) + r600_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT); + if (rctx->vs_shader && rctx->vs_shader->current->shader.uses_tex_buffers) + r600_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX); } else { - r600_update_dual_export_state(rctx); + if (rctx->ps_shader && rctx->ps_shader->current->shader.uses_tex_buffers) + eg_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT); + if (rctx->vs_shader && rctx->vs_shader->current->shader.uses_tex_buffers) + eg_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX); + } + + + if (rctx->ps_shader && rctx->ps_shader->current->shader.has_txq_cube_array_z_comp) + r600_setup_txq_cube_array_constants(rctx, PIPE_SHADER_FRAGMENT); + if (rctx->vs_shader && rctx->vs_shader->current->shader.has_txq_cube_array_z_comp) + r600_setup_txq_cube_array_constants(rctx, PIPE_SHADER_VERTEX); + + if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) { + if (!r600_adjust_gprs(rctx)) { + /* discard rendering */ + return false; + } } + + blend_disable = (rctx->dual_src_blend && + rctx->ps_shader->current->nr_ps_color_outputs < 2); + + if (blend_disable != rctx->force_blend_disable) { + rctx->force_blend_disable = blend_disable; + r600_bind_blend_state_internal(rctx, + rctx->blend_state.cso, + blend_disable); + } + return true; } static unsigned r600_conv_prim_to_gs_out(unsigned mode) @@ -1076,6 +1197,7 @@ static unsigned r600_conv_prim_to_gs_out(unsigned mode) V_028A6C_OUTPRIM_TYPE_LINESTRIP, V_028A6C_OUTPRIM_TYPE_LINESTRIP, V_028A6C_OUTPRIM_TYPE_TRISTRIP, + V_028A6C_OUTPRIM_TYPE_TRISTRIP, V_028A6C_OUTPRIM_TYPE_TRISTRIP }; assert(mode < Elements(prim_conv)); @@ -1083,20 +1205,28 @@ static unsigned r600_conv_prim_to_gs_out(unsigned mode) return prim_conv[mode]; } -void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) +void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct r600_clip_misc_state *state = &rctx->clip_misc_state; + + r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, + state->pa_cl_clip_cntl | + (state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F)); + r600_write_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL, + state->pa_cl_vs_out_cntl | + (state->clip_plane_enable & state->clip_dist_write)); +} + +static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) { struct r600_context *rctx = (struct r600_context *)ctx; struct pipe_draw_info info = *dinfo; struct pipe_index_buffer ib = {}; - unsigned prim, ls_mask = 0; - struct r600_block *dirty_block = NULL, *next_block = NULL; - struct r600_atom *state = NULL, *next_state = NULL; - struct radeon_winsys_cs *cs = rctx->cs; - uint64_t va; - uint8_t *ptr; + unsigned i; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; - if ((!info.count && (info.indexed || !info.count_from_stream_output)) || - !r600_conv_pipe_prim(info.mode, &prim)) { + if (!info.count && (info.indexed || !info.count_from_stream_output)) { assert(0); return; } @@ -1106,13 +1236,16 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) return; } - r600_update_derived_state(rctx); + /* make sure that the gfx ring is only one active */ + if (rctx->b.rings.dma.cs) { + rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC); + } - /* partial flush triggered by border color change */ - if (rctx->flags & R600_PARTIAL_FLUSH) { - rctx->flags &= ~R600_PARTIAL_FLUSH; - r600_write_value(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); - r600_write_value(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); + if (!r600_update_derived_state(rctx)) { + /* useless to render because current rendering command + * can't be achieved + */ + return; } if (info.indexed) { @@ -1122,75 +1255,94 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) ib.index_size = rctx->index_buffer.index_size; ib.offset = rctx->index_buffer.offset + info.start * ib.index_size; - /* Translate or upload, if needed. */ - r600_translate_index_buffer(rctx, &ib, info.count); + /* Translate 8-bit indices to 16-bit. */ + if (ib.index_size == 1) { + struct pipe_resource *out_buffer = NULL; + unsigned out_offset; + void *ptr; + + u_upload_alloc(rctx->uploader, 0, info.count * 2, + &out_offset, &out_buffer, &ptr); + + util_shorten_ubyte_elts_to_userptr( + &rctx->b.b, &ib, 0, ib.offset, info.count, ptr); - ptr = (uint8_t*)ib.user_buffer; - if (!ib.buffer && ptr) { + pipe_resource_reference(&ib.buffer, NULL); + ib.user_buffer = NULL; + ib.buffer = out_buffer; + ib.offset = out_offset; + ib.index_size = 2; + } + + /* Upload the index buffer. + * The upload is skipped for small index counts on little-endian machines + * and the indices are emitted via PKT3_DRAW_INDEX_IMMD. + * Note: Instanced rendering in combination with immediate indices hangs. */ + if (ib.user_buffer && (R600_BIG_ENDIAN || info.instance_count > 1 || + info.count*ib.index_size > 20)) { u_upload_data(rctx->uploader, 0, info.count * ib.index_size, - ptr, &ib.offset, &ib.buffer); + ib.user_buffer, &ib.offset, &ib.buffer); + ib.user_buffer = NULL; } } else { info.index_bias = info.start; } - if (rctx->vgt.id != R600_PIPE_STATE_VGT) { - rctx->vgt.id = R600_PIPE_STATE_VGT; - rctx->vgt.nregs = 0; - r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim); - r600_pipe_state_add_reg(&rctx->vgt, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 0); - r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias); - r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index); - r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart); - r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance); - r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE, 0); - r600_pipe_state_add_reg(&rctx->vgt, R_02881C_PA_CL_VS_OUT_CNTL, 0); - r600_pipe_state_add_reg(&rctx->vgt, R_028810_PA_CL_CLIP_CNTL, 0); - } - - rctx->vgt.nregs = 0; - r600_pipe_state_mod_reg(&rctx->vgt, prim); - r600_pipe_state_mod_reg(&rctx->vgt, r600_conv_prim_to_gs_out(info.mode)); - r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias); - r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index); - r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart); - r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance); - - if (prim == V_008958_DI_PT_LINELIST) - ls_mask = 1; - else if (prim == V_008958_DI_PT_LINESTRIP || - prim == V_008958_DI_PT_LINELOOP) - ls_mask = 2; - r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask) | rctx->pa_sc_line_stipple); - r600_pipe_state_mod_reg(&rctx->vgt, - rctx->vs_shader->current->pa_cl_vs_out_cntl | - (rctx->rasterizer->clip_plane_enable & rctx->vs_shader->current->shader.clip_dist_write)); - r600_pipe_state_mod_reg(&rctx->vgt, - rctx->pa_cl_clip_cntl | - (rctx->vs_shader->current->shader.clip_dist_write || - rctx->vs_shader->current->shader.vs_prohibit_ucps ? - 0 : rctx->rasterizer->clip_plane_enable & 0x3F)); - - r600_context_pipe_state_set(rctx, &rctx->vgt); - - /* Enable stream out if needed. */ - if (rctx->streamout_start) { - r600_context_streamout_begin(rctx); - rctx->streamout_start = FALSE; - } - - /* Emit states (the function expects that we emit at most 17 dwords here). */ - r600_need_cs_space(rctx, 0, TRUE); - - LIST_FOR_EACH_ENTRY_SAFE(state, next_state, &rctx->dirty_states, head) { - r600_emit_atom(rctx, state); - } - LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->dirty,list) { - r600_context_block_emit_dirty(rctx, dirty_block, 0 /* pkt_flags */); - } - rctx->pm4_dirty_cdwords = 0; - - /* draw packet */ + /* Set the index offset and primitive restart. */ + if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info.primitive_restart || + rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info.restart_index || + rctx->vgt_state.vgt_indx_offset != info.index_bias) { + rctx->vgt_state.vgt_multi_prim_ib_reset_en = info.primitive_restart; + rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info.restart_index; + rctx->vgt_state.vgt_indx_offset = info.index_bias; + rctx->vgt_state.atom.dirty = true; + } + + /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */ + if (rctx->b.chip_class == R600) { + rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; + rctx->cb_misc_state.atom.dirty = true; + } + + /* Emit states. */ + r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE); + r600_flush_emit(rctx); + + for (i = 0; i < R600_NUM_ATOMS; i++) { + if (rctx->atoms[i] == NULL || !rctx->atoms[i]->dirty) { + continue; + } + r600_emit_atom(rctx, rctx->atoms[i]); + } + + /* Update start instance. */ + if (rctx->last_start_instance != info.start_instance) { + r600_write_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance); + rctx->last_start_instance = info.start_instance; + } + + /* Update the primitive type. */ + if (rctx->last_primitive_type != info.mode) { + unsigned ls_mask = 0; + + if (info.mode == PIPE_PRIM_LINES) + ls_mask = 1; + else if (info.mode == PIPE_PRIM_LINE_STRIP || + info.mode == PIPE_PRIM_LINE_LOOP) + ls_mask = 2; + + r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE, + S_028A0C_AUTO_RESET_CNTL(ls_mask) | + (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0)); + r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, + r600_conv_prim_to_gs_out(info.mode)); + r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, + r600_conv_pipe_prim(info.mode)); + + rctx->last_primitive_type = info.mode; + } + + /* Draw packets. */ cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing); cs->buf[cs->cdw++] = info.instance_count; if (info.indexed) { @@ -1199,19 +1351,28 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) : (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)); - va = r600_resource_va(ctx->screen, ib.buffer); - va += ib.offset; - cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - cs->buf[cs->cdw++] = info.count; - cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA; - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing); - cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ); + if (ib.user_buffer) { + unsigned size_bytes = info.count*ib.index_size; + unsigned size_dw = align(size_bytes, 4) / 4; + cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, rctx->predicate_drawing); + cs->buf[cs->cdw++] = info.count; + cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_IMMEDIATE; + memcpy(cs->buf+cs->cdw, ib.user_buffer, size_bytes); + cs->cdw += size_dw; + } else { + uint64_t va = r600_resource_va(ctx->screen, ib.buffer) + ib.offset; + cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + cs->buf[cs->cdw++] = info.count; + cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA; + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ); + } } else { if (info.count_from_stream_output) { struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output; - uint64_t va = r600_resource_va(&rctx->screen->screen, (void*)t->filled_size); + uint64_t va = r600_resource_va(&rctx->screen->b.b, (void*)t->buf_filled_size) + t->buf_filled_size_offset; r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw); @@ -1223,7 +1384,7 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) cs->buf[cs->cdw++] = 0; /* unused */ cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, t->filled_size, RADEON_USAGE_READ); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, t->buf_filled_size, RADEON_USAGE_READ); } cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing); @@ -1232,69 +1393,95 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0); } - rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING; + if (rctx->screen->trace_bo) { + r600_trace_emit(rctx); + } /* Set the depth buffer as dirty. */ - if (rctx->framebuffer.zsbuf) { - struct pipe_surface *surf = rctx->framebuffer.zsbuf; - struct r600_resource_texture *rtex = (struct r600_resource_texture *)surf->texture; + if (rctx->framebuffer.state.zsbuf) { + struct pipe_surface *surf = rctx->framebuffer.state.zsbuf; + struct r600_texture *rtex = (struct r600_texture *)surf->texture; - rtex->dirty_db_mask |= 1 << surf->u.tex.level; + rtex->dirty_level_mask |= 1 << surf->u.tex.level; } + if (rctx->framebuffer.compressed_cb_mask) { + struct pipe_surface *surf; + struct r600_texture *rtex; + unsigned mask = rctx->framebuffer.compressed_cb_mask; - pipe_resource_reference(&ib.buffer, NULL); -} + do { + unsigned i = u_bit_scan(&mask); + surf = rctx->framebuffer.state.cbufs[i]; + rtex = (struct r600_texture*)surf->texture; -void _r600_pipe_state_add_reg_bo(struct r600_context *ctx, - struct r600_pipe_state *state, - uint32_t offset, uint32_t value, - uint32_t range_id, uint32_t block_id, - struct r600_resource *bo, - enum radeon_bo_usage usage) - -{ - struct r600_range *range; - struct r600_block *block; + rtex->dirty_level_mask |= 1 << surf->u.tex.level; - if (bo) assert(usage); - - range = &ctx->range[range_id]; - block = range->blocks[block_id]; - state->regs[state->nregs].block = block; - state->regs[state->nregs].id = (offset - block->start_offset) >> 2; - - state->regs[state->nregs].value = value; - state->regs[state->nregs].bo = bo; - state->regs[state->nregs].bo_usage = usage; - - state->nregs++; - assert(state->nregs < R600_BLOCK_MAX_REG); -} + } while (mask); + } -void _r600_pipe_state_add_reg(struct r600_context *ctx, - struct r600_pipe_state *state, - uint32_t offset, uint32_t value, - uint32_t range_id, uint32_t block_id) -{ - _r600_pipe_state_add_reg_bo(ctx, state, offset, value, - range_id, block_id, NULL, 0); + pipe_resource_reference(&ib.buffer, NULL); + rctx->num_draw_calls++; } -void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state, - uint32_t offset, uint32_t value, - struct r600_resource *bo, - enum radeon_bo_usage usage) +void r600_draw_rectangle(struct blitter_context *blitter, + int x1, int y1, int x2, int y2, float depth, + enum blitter_attrib_type type, const union pipe_color_union *attrib) { - if (bo) assert(usage); - - state->regs[state->nregs].id = offset; - state->regs[state->nregs].block = NULL; - state->regs[state->nregs].value = value; - state->regs[state->nregs].bo = bo; - state->regs[state->nregs].bo_usage = usage; + struct r600_context *rctx = (struct r600_context*)util_blitter_get_pipe(blitter); + struct pipe_viewport_state viewport; + struct pipe_resource *buf = NULL; + unsigned offset = 0; + float *vb; + + if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) { + util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib); + return; + } - state->nregs++; - assert(state->nregs < R600_BLOCK_MAX_REG); + /* Some operations (like color resolve on r6xx) don't work + * with the conventional primitive types. + * One that works is PT_RECTLIST, which we use here. */ + + /* setup viewport */ + viewport.scale[0] = 1.0f; + viewport.scale[1] = 1.0f; + viewport.scale[2] = 1.0f; + viewport.scale[3] = 1.0f; + viewport.translate[0] = 0.0f; + viewport.translate[1] = 0.0f; + viewport.translate[2] = 0.0f; + viewport.translate[3] = 0.0f; + rctx->b.b.set_viewport_states(&rctx->b.b, 0, 1, &viewport); + + /* Upload vertices. The hw rectangle has only 3 vertices, + * I guess the 4th one is derived from the first 3. + * The vertex specification should match u_blitter's vertex element state. */ + u_upload_alloc(rctx->uploader, 0, sizeof(float) * 24, &offset, &buf, (void**)&vb); + vb[0] = x1; + vb[1] = y1; + vb[2] = depth; + vb[3] = 1; + + vb[8] = x1; + vb[9] = y2; + vb[10] = depth; + vb[11] = 1; + + vb[16] = x2; + vb[17] = y1; + vb[18] = depth; + vb[19] = 1; + + if (attrib) { + memcpy(vb+4, attrib->f, sizeof(float)*4); + memcpy(vb+12, attrib->f, sizeof(float)*4); + memcpy(vb+20, attrib->f, sizeof(float)*4); + } + + /* draw */ + util_draw_vertex_buffer(&rctx->b.b, NULL, buf, rctx->blitter->vb_slot, offset, + R600_PRIM_RECTANGLE_LIST, 3, 2); + pipe_resource_reference(&buf, NULL); } uint32_t r600_translate_stencil_op(int s_op) @@ -1408,3 +1595,90 @@ unsigned r600_tex_compare(unsigned compare) return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS; } } + +static bool wrap_mode_uses_border_color(unsigned wrap, bool linear_filter) +{ + return wrap == PIPE_TEX_WRAP_CLAMP_TO_BORDER || + wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER || + (linear_filter && + (wrap == PIPE_TEX_WRAP_CLAMP || + wrap == PIPE_TEX_WRAP_MIRROR_CLAMP)); +} + +bool sampler_state_needs_border_color(const struct pipe_sampler_state *state) +{ + bool linear_filter = state->min_img_filter != PIPE_TEX_FILTER_NEAREST || + state->mag_img_filter != PIPE_TEX_FILTER_NEAREST; + + return (state->border_color.ui[0] || state->border_color.ui[1] || + state->border_color.ui[2] || state->border_color.ui[3]) && + (wrap_mode_uses_border_color(state->wrap_s, linear_filter) || + wrap_mode_uses_border_color(state->wrap_t, linear_filter) || + wrap_mode_uses_border_color(state->wrap_r, linear_filter)); +} + +void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a) +{ + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader->current; + + r600_emit_command_buffer(cs, &shader->command_buffer); + + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->bo, RADEON_USAGE_READ)); +} + +/* keep this at the end of this file, please */ +void r600_init_common_state_functions(struct r600_context *rctx) +{ + rctx->b.b.create_fs_state = r600_create_ps_state; + rctx->b.b.create_vs_state = r600_create_vs_state; + rctx->b.b.create_vertex_elements_state = r600_create_vertex_fetch_shader; + rctx->b.b.bind_blend_state = r600_bind_blend_state; + rctx->b.b.bind_depth_stencil_alpha_state = r600_bind_dsa_state; + rctx->b.b.bind_fragment_sampler_states = r600_bind_ps_sampler_states; + rctx->b.b.bind_fs_state = r600_bind_ps_state; + rctx->b.b.bind_rasterizer_state = r600_bind_rs_state; + rctx->b.b.bind_vertex_elements_state = r600_bind_vertex_elements; + rctx->b.b.bind_vertex_sampler_states = r600_bind_vs_sampler_states; + rctx->b.b.bind_vs_state = r600_bind_vs_state; + rctx->b.b.delete_blend_state = r600_delete_blend_state; + rctx->b.b.delete_depth_stencil_alpha_state = r600_delete_dsa_state; + rctx->b.b.delete_fs_state = r600_delete_ps_state; + rctx->b.b.delete_rasterizer_state = r600_delete_rs_state; + rctx->b.b.delete_sampler_state = r600_delete_sampler_state; + rctx->b.b.delete_vertex_elements_state = r600_delete_vertex_elements; + rctx->b.b.delete_vs_state = r600_delete_vs_state; + rctx->b.b.set_blend_color = r600_set_blend_color; + rctx->b.b.set_clip_state = r600_set_clip_state; + rctx->b.b.set_constant_buffer = r600_set_constant_buffer; + rctx->b.b.set_sample_mask = r600_set_sample_mask; + rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref; + rctx->b.b.set_viewport_states = r600_set_viewport_states; + rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers; + rctx->b.b.set_index_buffer = r600_set_index_buffer; + rctx->b.b.set_fragment_sampler_views = r600_set_ps_sampler_views; + rctx->b.b.set_vertex_sampler_views = r600_set_vs_sampler_views; + rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy; + rctx->b.b.texture_barrier = r600_texture_barrier; + rctx->b.b.set_stream_output_targets = r600_set_streamout_targets; + rctx->b.b.draw_vbo = r600_draw_vbo; +} + +void r600_trace_emit(struct r600_context *rctx) +{ + struct r600_screen *rscreen = rctx->screen; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + uint64_t va; + uint32_t reloc; + + va = r600_resource_va(&rscreen->b.b, (void*)rscreen->trace_bo); + reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rscreen->trace_bo, RADEON_USAGE_READWRITE); + radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0)); + radeon_emit(cs, va & 0xFFFFFFFFUL); + radeon_emit(cs, (va >> 32UL) & 0xFFUL); + radeon_emit(cs, cs->cdw); + radeon_emit(cs, rscreen->cs_count); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); +}