rstate = &blend->rstate;
rctx->states[rstate->id] = rstate;
rctx->dual_src_blend = blend->dual_src_blend;
+ rctx->alpha_to_one = blend->alpha_to_one;
r600_context_pipe_state_set(rctx, rstate);
if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) {
rctx->two_side = rs->two_side;
rctx->pa_sc_line_stipple = rs->pa_sc_line_stipple;
rctx->pa_cl_clip_cntl = rs->pa_cl_clip_cntl;
+ rctx->multisample_enable = rs->multisample_enable;
rctx->rasterizer = rs;
FREE(resource);
}
+static void r600_bind_samplers(struct r600_context *rctx,
+ struct r600_textures_info *dst,
+ unsigned count, void **states)
+{
+ int seamless_cube_map = -1;
+ unsigned i;
+
+ memcpy(dst->samplers, states, sizeof(void*) * count);
+ dst->n_samplers = count;
+ dst->atom_sampler.num_dw = 0;
+
+ for (i = 0; i < count; i++) {
+ struct r600_pipe_sampler_state *sampler = states[i];
+
+ if (sampler == NULL) {
+ continue;
+ }
+ if (sampler->border_color_use) {
+ dst->atom_sampler.num_dw += 11;
+ rctx->flags |= R600_PARTIAL_FLUSH;
+ } else {
+ dst->atom_sampler.num_dw += 5;
+ }
+ seamless_cube_map = sampler->seamless_cube_map;
+ }
+ if (rctx->chip_class <= R700 && seamless_cube_map != -1 && seamless_cube_map != rctx->seamless_cube_map.enabled) {
+ /* change in TA_CNTL_AUX need a pipeline flush */
+ rctx->flags |= R600_PARTIAL_FLUSH;
+ rctx->seamless_cube_map.enabled = seamless_cube_map;
+ r600_atom_dirty(rctx, &rctx->seamless_cube_map.atom);
+ }
+ if (dst->atom_sampler.num_dw) {
+ r600_atom_dirty(rctx, &dst->atom_sampler);
+ }
+}
+
+void r600_bind_vs_samplers(struct pipe_context *ctx, unsigned count, void **states)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ r600_bind_samplers(rctx, &rctx->vs_samplers, count, states);
+}
+
+void r600_bind_ps_samplers(struct pipe_context *ctx, unsigned count, void **states)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ r600_bind_samplers(rctx, &rctx->ps_samplers, count, states);
+}
+
+void r600_delete_sampler(struct pipe_context *ctx, void *state)
+{
+ free(state);
+}
+
void r600_delete_state(struct pipe_context *ctx, void *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
if (rctx->chip_class <= R700 &&
(rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY ||
rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) {
- dst->samplers_dirty = true;
+ r600_atom_dirty(rctx, &dst->atom_sampler);
}
pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]);
if (sel->type == PIPE_SHADER_FRAGMENT) {
key = rctx->two_side |
- MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 1;
+ ((rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer) << 1) |
+ (MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 2);
} else
key = 0;
rctx->streamout_append_bitmask = append_bitmask;
}
+void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
+{
+ struct r600_context *rctx = (struct r600_context*)pipe;
+
+ if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask)
+ return;
+
+ rctx->sample_mask.sample_mask = sample_mask;
+ r600_atom_dirty(rctx, &rctx->sample_mask.atom);
+}
+
static void r600_update_derived_state(struct r600_context *rctx)
{
struct pipe_context * ctx = (struct pipe_context*)rctx;
}
}
- if (rctx->chip_class < EVERGREEN) {
- r600_update_sampler_states(rctx);
- }
-
r600_shader_select(ctx, rctx->ps_shader, &ps_dirty);
if (rctx->ps_shader && ((rctx->sprite_coord_enable &&
r600_update_derived_state(rctx);
+ /* partial flush triggered by border color change */
+ if (rctx->flags & R600_PARTIAL_FLUSH) {
+ rctx->flags &= ~R600_PARTIAL_FLUSH;
+ r600_write_value(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ r600_write_value(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
if (info.indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
r600_context_pipe_state_set(rctx, &rctx->vgt);
+ /* Enable stream out if needed. */
+ if (rctx->streamout_start) {
+ r600_context_streamout_begin(rctx);
+ rctx->streamout_start = FALSE;
+ }
+
/* Emit states (the function expects that we emit at most 17 dwords here). */
- r600_need_cs_space(rctx,
- !info.indexed && info.count_from_stream_output ? 14 : 0,
- TRUE);
+ r600_need_cs_space(rctx, 0, TRUE);
LIST_FOR_EACH_ENTRY_SAFE(state, next_state, &rctx->dirty_states, head) {
r600_emit_atom(rctx, state);
}
rctx->pm4_dirty_cdwords = 0;
- /* Enable stream out if needed. */
- if (rctx->streamout_start) {
- r600_context_streamout_begin(rctx);
- rctx->streamout_start = FALSE;
- }
-
/* draw packet */
- cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
- cs->buf[cs->cdw++] = ib.index_size == 4 ?
- (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
- (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing);
cs->buf[cs->cdw++] = info.instance_count;
if (info.indexed) {
+ cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
+ cs->buf[cs->cdw++] = ib.index_size == 4 ?
+ (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
+ (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
+
va = r600_resource_va(ctx->screen, ib.buffer);
va += ib.offset;
cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing);
struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
uint64_t va = r600_resource_va(&rctx->screen->screen, (void*)t->filled_size);
- r600_write_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);