atom->flags = flags;
}
+static void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = rctx->cs;
+ struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
+ unsigned alpha_ref = a->sx_alpha_ref;
+
+ if (rctx->chip_class >= EVERGREEN && a->cb0_export_16bpc) {
+ alpha_ref &= ~0x1FFF;
+ }
+
+ r600_write_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL,
+ a->sx_alpha_test_control |
+ S_028410_ALPHA_TEST_BYPASS(a->bypass));
+ r600_write_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
+}
+
void r600_init_common_atoms(struct r600_context *rctx)
{
r600_init_atom(&rctx->surface_sync_cmd.atom, r600_emit_surface_sync, 5, EMIT_EARLY);
r600_init_atom(&rctx->r6xx_flush_and_inv_cmd, r600_emit_r6xx_flush_and_inv, 2, EMIT_EARLY);
+ r600_init_atom(&rctx->alphatest_state.atom, r600_emit_alphatest_state, 3, 0);
+ r600_atom_dirty(rctx, &rctx->alphatest_state.atom);
}
unsigned r600_get_cb_flush_flags(struct r600_context *rctx)
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
struct r600_pipe_state *rstate;
+ bool update_cb = false;
if (state == NULL)
return;
rstate = &blend->rstate;
rctx->states[rstate->id] = rstate;
rctx->dual_src_blend = blend->dual_src_blend;
+ rctx->alpha_to_one = blend->alpha_to_one;
r600_context_pipe_state_set(rctx, rstate);
if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) {
rctx->cb_misc_state.blend_colormask = blend->cb_target_mask;
- r600_atom_dirty(rctx, &rctx->cb_misc_state.atom);
+ update_cb = true;
}
if (rctx->chip_class <= R700 &&
rctx->cb_misc_state.cb_color_control != blend->cb_color_control) {
rctx->cb_misc_state.cb_color_control = blend->cb_color_control;
+ update_cb = true;
+ }
+ if (rctx->cb_misc_state.dual_src_blend != blend->dual_src_blend) {
+ rctx->cb_misc_state.dual_src_blend = blend->dual_src_blend;
+ update_cb = true;
+ }
+ if (update_cb) {
r600_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
}
return;
rstate = &dsa->rstate;
rctx->states[rstate->id] = rstate;
- rctx->sx_alpha_test_control &= ~0xff;
- rctx->sx_alpha_test_control |= dsa->sx_alpha_test_control;
- rctx->alpha_ref = dsa->alpha_ref;
- rctx->alpha_ref_dirty = true;
r600_context_pipe_state_set(rctx, rstate);
ref.ref_value[0] = rctx->stencil_ref.ref_value[0];
r600_set_stencil_ref(ctx, &ref);
- if (rctx->db_misc_state.flush_depthstencil_enabled != dsa->is_flush) {
- rctx->db_misc_state.flush_depthstencil_enabled = dsa->is_flush;
- r600_atom_dirty(rctx, &rctx->db_misc_state.atom);
+ /* Update alphatest state. */
+ if (rctx->alphatest_state.sx_alpha_test_control != dsa->sx_alpha_test_control ||
+ rctx->alphatest_state.sx_alpha_ref != dsa->alpha_ref) {
+ rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control;
+ rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref;
+ r600_atom_dirty(rctx, &rctx->alphatest_state.atom);
}
}
rctx->two_side = rs->two_side;
rctx->pa_sc_line_stipple = rs->pa_sc_line_stipple;
rctx->pa_cl_clip_cntl = rs->pa_cl_clip_cntl;
+ rctx->multisample_enable = rs->multisample_enable;
rctx->rasterizer = rs;
FREE(resource);
}
+static void r600_bind_samplers(struct r600_context *rctx,
+ struct r600_textures_info *dst,
+ unsigned count, void **states)
+{
+ int seamless_cube_map = -1;
+ unsigned i;
+
+ memcpy(dst->samplers, states, sizeof(void*) * count);
+ dst->n_samplers = count;
+ dst->atom_sampler.num_dw = 0;
+
+ for (i = 0; i < count; i++) {
+ struct r600_pipe_sampler_state *sampler = states[i];
+
+ if (sampler == NULL) {
+ continue;
+ }
+ if (sampler->border_color_use) {
+ dst->atom_sampler.num_dw += 11;
+ rctx->flags |= R600_PARTIAL_FLUSH;
+ } else {
+ dst->atom_sampler.num_dw += 5;
+ }
+ seamless_cube_map = sampler->seamless_cube_map;
+ }
+ if (rctx->chip_class <= R700 && seamless_cube_map != -1 && seamless_cube_map != rctx->seamless_cube_map.enabled) {
+ /* change in TA_CNTL_AUX need a pipeline flush */
+ rctx->flags |= R600_PARTIAL_FLUSH;
+ rctx->seamless_cube_map.enabled = seamless_cube_map;
+ r600_atom_dirty(rctx, &rctx->seamless_cube_map.atom);
+ }
+ if (dst->atom_sampler.num_dw) {
+ r600_atom_dirty(rctx, &dst->atom_sampler);
+ }
+}
+
+void r600_bind_vs_samplers(struct pipe_context *ctx, unsigned count, void **states)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ r600_bind_samplers(rctx, &rctx->vs_samplers, count, states);
+}
+
+void r600_bind_ps_samplers(struct pipe_context *ctx, unsigned count, void **states)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ r600_bind_samplers(rctx, &rctx->ps_samplers, count, states);
+}
+
+void r600_delete_sampler(struct pipe_context *ctx, void *state)
+{
+ free(state);
+}
+
void r600_delete_state(struct pipe_context *ctx, void *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
}
}
+void r600_vertex_buffers_dirty(struct r600_context *rctx)
+{
+ if (rctx->vertex_buffer_state.dirty_mask) {
+ r600_inval_vertex_cache(rctx);
+ rctx->vertex_buffer_state.atom.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 11) *
+ util_bitcount(rctx->vertex_buffer_state.dirty_mask);
+ r600_atom_dirty(rctx, &rctx->vertex_buffer_state.atom);
+ }
+}
+
void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
- const struct pipe_vertex_buffer *buffers)
+ const struct pipe_vertex_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state;
+ struct pipe_vertex_buffer *vb = state->vb;
+ unsigned i;
+ /* This sets 1-bit for buffers with index >= count. */
+ uint32_t disable_mask = ~((1ull << count) - 1);
+ /* These are the new buffers set by this function. */
+ uint32_t new_buffer_mask = 0;
+
+ /* Set buffers with index >= count to NULL. */
+ uint32_t remaining_buffers_mask =
+ rctx->vertex_buffer_state.enabled_mask & disable_mask;
+
+ while (remaining_buffers_mask) {
+ i = u_bit_scan(&remaining_buffers_mask);
+ pipe_resource_reference(&vb[i].buffer, NULL);
+ }
+
+ /* Set vertex buffers. */
+ for (i = 0; i < count; i++) {
+ if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
+ if (input[i].buffer) {
+ vb[i].stride = input[i].stride;
+ vb[i].buffer_offset = input[i].buffer_offset;
+ pipe_resource_reference(&vb[i].buffer, input[i].buffer);
+ new_buffer_mask |= 1 << i;
+ } else {
+ pipe_resource_reference(&vb[i].buffer, NULL);
+ disable_mask |= 1 << i;
+ }
+ }
+ }
- util_copy_vertex_buffers(rctx->vertex_buffer, &rctx->nr_vertex_buffers, buffers, count);
+ rctx->vertex_buffer_state.enabled_mask &= ~disable_mask;
+ rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask;
+ rctx->vertex_buffer_state.enabled_mask |= new_buffer_mask;
+ rctx->vertex_buffer_state.dirty_mask |= new_buffer_mask;
- r600_inval_vertex_cache(rctx);
- rctx->vertex_buffer_state.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 10) *
- rctx->nr_vertex_buffers;
- r600_atom_dirty(rctx, &rctx->vertex_buffer_state);
+ r600_vertex_buffers_dirty(rctx);
+}
+
+void r600_sampler_views_dirty(struct r600_context *rctx,
+ struct r600_samplerview_state *state)
+{
+ if (state->dirty_mask) {
+ r600_inval_texture_cache(rctx);
+ state->atom.num_dw = (rctx->chip_class >= EVERGREEN ? 14 : 13) *
+ util_bitcount(state->dirty_mask);
+ r600_atom_dirty(rctx, &state->atom);
+ }
+}
+
+void r600_set_sampler_views(struct r600_context *rctx,
+ struct r600_textures_info *dst,
+ unsigned count,
+ struct pipe_sampler_view **views)
+{
+ struct r600_pipe_sampler_view **rviews = (struct r600_pipe_sampler_view **)views;
+ unsigned i;
+ /* This sets 1-bit for textures with index >= count. */
+ uint32_t disable_mask = ~((1ull << count) - 1);
+ /* These are the new textures set by this function. */
+ uint32_t new_mask = 0;
+
+ /* Set textures with index >= count to NULL. */
+ uint32_t remaining_mask = dst->views.enabled_mask & disable_mask;
+
+ while (remaining_mask) {
+ i = u_bit_scan(&remaining_mask);
+ assert(dst->views.views[i]);
+
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (rviews[i] == dst->views.views[i]) {
+ continue;
+ }
+
+ if (rviews[i]) {
+ struct r600_resource_texture *rtex =
+ (struct r600_resource_texture*)rviews[i]->base.texture;
+
+ if (rtex->is_depth && !rtex->is_flushing_texture) {
+ dst->views.depth_texture_mask |= 1 << i;
+ } else {
+ dst->views.depth_texture_mask &= ~(1 << i);
+ }
+
+ /* Changing from array to non-arrays textures and vice
+ * versa requires updating TEX_ARRAY_OVERRIDE on R6xx-R7xx. */
+ if (rctx->chip_class <= R700 &&
+ (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY ||
+ rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) {
+ r600_atom_dirty(rctx, &dst->atom_sampler);
+ }
+
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]);
+ new_mask |= 1 << i;
+ } else {
+ pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
+ disable_mask |= 1 << i;
+ }
+ }
+
+ dst->views.enabled_mask &= ~disable_mask;
+ dst->views.dirty_mask &= dst->views.enabled_mask;
+ dst->views.enabled_mask |= new_mask;
+ dst->views.dirty_mask |= new_mask;
+ dst->views.depth_texture_mask &= dst->views.enabled_mask;
+
+ r600_sampler_views_dirty(rctx, &dst->views);
}
void *r600_create_vertex_elements(struct pipe_context *ctx,
if (sel->type == PIPE_SHADER_FRAGMENT) {
key = rctx->two_side |
- MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 1;
+ ((rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer) << 1) |
+ (MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 2);
} else
key = 0;
r600_adjust_gprs(rctx);
}
+ if (rctx->ps_shader &&
+ rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
+ rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
+ r600_atom_dirty(rctx, &rctx->cb_misc_state.atom);
+ }
return 0;
}
if (rctx->vs_shader)
r600_adjust_gprs(rctx);
}
+
+ if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
+ rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
+ r600_atom_dirty(rctx, &rctx->cb_misc_state.atom);
+ }
}
void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
r600_delete_shader_selector(ctx, sel);
}
-static void r600_update_alpha_ref(struct r600_context *rctx)
-{
- unsigned alpha_ref;
- struct r600_pipe_state rstate;
-
- alpha_ref = rctx->alpha_ref;
- rstate.nregs = 0;
- if (rctx->export_16bpc && rctx->chip_class >= EVERGREEN) {
- alpha_ref &= ~0x1FFF;
- }
- r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref);
-
- r600_context_pipe_state_set(rctx, &rstate);
- rctx->alpha_ref_dirty = false;
-}
-
void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
{
- r600_inval_shader_cache(rctx);
- state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
- : util_bitcount(state->dirty_mask)*19;
- r600_atom_dirty(rctx, &state->atom);
+ if (state->dirty_mask) {
+ r600_inval_shader_cache(rctx);
+ state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
+ : util_bitcount(state->dirty_mask)*19;
+ r600_atom_dirty(rctx, &state->atom);
+ }
}
void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
rctx->streamout_append_bitmask = append_bitmask;
}
+void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
+{
+ struct r600_context *rctx = (struct r600_context*)pipe;
+
+ if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask)
+ return;
+
+ rctx->sample_mask.sample_mask = sample_mask;
+ r600_atom_dirty(rctx, &rctx->sample_mask.atom);
+}
+
static void r600_update_derived_state(struct r600_context *rctx)
{
struct pipe_context * ctx = (struct pipe_context*)rctx;
unsigned ps_dirty = 0;
if (!rctx->blitter->running) {
- if (rctx->have_depth_fb || rctx->have_depth_texture)
- r600_flush_depth_textures(rctx);
- }
-
- if (rctx->chip_class < EVERGREEN) {
- r600_update_sampler_states(rctx);
+ /* Flush depth textures which need to be flushed. */
+ if (rctx->vs_samplers.views.depth_texture_mask) {
+ r600_flush_depth_textures(rctx, &rctx->vs_samplers.views);
+ }
+ if (rctx->ps_samplers.views.depth_texture_mask) {
+ r600_flush_depth_textures(rctx, &rctx->ps_samplers.views);
+ }
}
r600_shader_select(ctx, rctx->ps_shader, &ps_dirty);
- if (rctx->alpha_ref_dirty) {
- r600_update_alpha_ref(rctx);
- }
-
if (rctx->ps_shader && ((rctx->sprite_coord_enable &&
(rctx->ps_shader->current->sprite_coord_enable != rctx->sprite_coord_enable)) ||
(rctx->rasterizer && rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade))) {
} else {
r600_update_dual_export_state(rctx);
}
-
- if (rctx->dual_src_blend) {
- rctx->cb_shader_mask = rctx->ps_shader->current->ps_cb_shader_mask | rctx->fb_cb_shader_mask;
- } else {
- rctx->cb_shader_mask = rctx->fb_cb_shader_mask;
- }
}
static unsigned r600_conv_prim_to_gs_out(unsigned mode)
r600_update_derived_state(rctx);
+ /* partial flush triggered by border color change */
+ if (rctx->flags & R600_PARTIAL_FLUSH) {
+ rctx->flags &= ~R600_PARTIAL_FLUSH;
+ r600_write_value(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ r600_write_value(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
if (info.indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
}
} else {
info.index_bias = info.start;
- if (info.count_from_stream_output) {
- r600_context_draw_opaque_count(rctx, (struct r600_so_target*)info.count_from_stream_output);
- }
}
if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
rctx->vgt.nregs = 0;
r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim);
r600_pipe_state_add_reg(&rctx->vgt, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 0);
- r600_pipe_state_add_reg(&rctx->vgt, R_02823C_CB_SHADER_MASK, 0);
r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias);
r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index);
- r600_pipe_state_add_reg(&rctx->vgt, R_028410_SX_ALPHA_TEST_CONTROL, 0);
r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart);
r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE, 0);
rctx->vgt.nregs = 0;
r600_pipe_state_mod_reg(&rctx->vgt, prim);
r600_pipe_state_mod_reg(&rctx->vgt, r600_conv_prim_to_gs_out(info.mode));
- r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_shader_mask);
r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias);
r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index);
- r600_pipe_state_mod_reg(&rctx->vgt, rctx->sx_alpha_test_control);
r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart);
r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance);
if (prim == V_008958_DI_PT_LINELIST)
ls_mask = 1;
- else if (prim == V_008958_DI_PT_LINESTRIP)
+ else if (prim == V_008958_DI_PT_LINESTRIP ||
+ prim == V_008958_DI_PT_LINELOOP)
ls_mask = 2;
r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask) | rctx->pa_sc_line_stipple);
r600_pipe_state_mod_reg(&rctx->vgt,
r600_context_pipe_state_set(rctx, &rctx->vgt);
+ /* Enable stream out if needed. */
+ if (rctx->streamout_start) {
+ r600_context_streamout_begin(rctx);
+ rctx->streamout_start = FALSE;
+ }
+
/* Emit states (the function expects that we emit at most 17 dwords here). */
r600_need_cs_space(rctx, 0, TRUE);
LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->dirty,list) {
r600_context_block_emit_dirty(rctx, dirty_block, 0 /* pkt_flags */);
}
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->resource_dirty,list) {
- r600_context_block_resource_emit_dirty(rctx, dirty_block);
- }
rctx->pm4_dirty_cdwords = 0;
- /* Enable stream out if needed. */
- if (rctx->streamout_start) {
- r600_context_streamout_begin(rctx);
- rctx->streamout_start = FALSE;
- }
-
/* draw packet */
- cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
- cs->buf[cs->cdw++] = ib.index_size == 4 ?
- (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
- (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing);
cs->buf[cs->cdw++] = info.instance_count;
if (info.indexed) {
+ cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
+ cs->buf[cs->cdw++] = ib.index_size == 4 ?
+ (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
+ (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
+
va = r600_resource_va(ctx->screen, ib.buffer);
va += ib.offset;
cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing);
cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ);
} else {
+ if (info.count_from_stream_output) {
+ struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
+ uint64_t va = r600_resource_va(&rctx->screen->screen, (void*)t->filled_size);
+
+ r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
+
+ cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);
+ cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG;
+ cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
+ cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
+ cs->buf[cs->cdw++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2; /* dst register */
+ cs->buf[cs->cdw++] = 0; /* unused */
+
+ cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
+ cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, t->filled_size, RADEON_USAGE_READ);
+ }
+
cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing);
cs->buf[cs->cdw++] = info.count;
cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX |
rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING;
- if (rctx->framebuffer.zsbuf)
- {
- struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
- ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
+ /* Set the depth buffer as dirty. */
+ if (rctx->framebuffer.zsbuf) {
+ struct pipe_surface *surf = rctx->framebuffer.zsbuf;
+ struct r600_resource_texture *rtex = (struct r600_resource_texture *)surf->texture;
+
+ rtex->dirty_db_mask |= 1 << surf->u.tex.level;
}
pipe_resource_reference(&ib.buffer, NULL);