int gs_next_vertex;
struct r600_shader *gs_for_vs;
int gs_export_gpr_treg;
+ unsigned enabled_stream_buffers_mask;
};
struct r600_shader_tgsi_instruction {
* with MEM_STREAM instructions */
output.array_size = 0xFFF;
output.comp_mask = ((1 << so->output[i].num_components) - 1) << so->output[i].start_component;
+
+ ctx->enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer);
+
if (ctx->bc->chip_class >= EVERGREEN) {
switch (so->output[i].output_buffer) {
case 0:
gs->gs_copy_shader = cshader;
ctx.bc->nstack = 1;
+
+ cshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
cshader->shader.ring_item_size = ocnt * 16;
return r600_bytecode_build(ctx.bc);
so.num_outputs && !use_llvm)
emit_streamout(&ctx, &so);
+ pipeshader->enabled_stream_buffers_mask = ctx.enabled_stream_buffers_mask;
convert_edgeflag_to_int(&ctx);
if (ring_outputs) {
struct r600_shader_key key;
unsigned db_shader_control;
unsigned ps_depth_export;
+ unsigned enabled_stream_buffers_mask;
};
/* return the table index 0-5 for TGSI_INTERPOLATE_LINEAR/PERSPECTIVE and
rctx->clip_misc_state.clip_disable = rctx->gs_shader->current->shader.vs_position_window_space;
rctx->clip_misc_state.atom.dirty = true;
}
+ rctx->b.streamout.enabled_stream_buffers_mask = rctx->gs_shader->current->gs_copy_shader->enabled_stream_buffers_mask;
}
r600_shader_select(ctx, rctx->vs_shader, &vs_dirty);
rctx->clip_misc_state.clip_disable = rctx->vs_shader->current->shader.vs_position_window_space;
rctx->clip_misc_state.atom.dirty = true;
}
+ rctx->b.streamout.enabled_stream_buffers_mask = rctx->vs_shader->current->enabled_stream_buffers_mask;
}
}
/* External state which comes from the vertex shader,
* it must be set explicitly when binding a shader. */
unsigned *stride_in_dw;
+ unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
/* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
unsigned hw_enabled_mask;
static void r600_emit_streamout_enable(struct r600_common_context *rctx,
struct r600_atom *atom)
{
- r600_write_context_reg(rctx->rings.gfx.cs,
- rctx->chip_class >= EVERGREEN ?
- R_028B98_VGT_STRMOUT_BUFFER_CONFIG :
- R_028B20_VGT_STRMOUT_BUFFER_EN,
- rctx->streamout.hw_enabled_mask);
-
- r600_write_context_reg(rctx->rings.gfx.cs,
- rctx->chip_class >= EVERGREEN ?
- R_028B94_VGT_STRMOUT_CONFIG :
- R_028AB0_VGT_STRMOUT_EN,
- S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)));
+ unsigned strmout_config_reg = R_028AB0_VGT_STRMOUT_EN;
+ unsigned strmout_config_val = S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx));
+ unsigned strmout_buffer_reg = R_028B20_VGT_STRMOUT_BUFFER_EN;
+ unsigned strmout_buffer_val = rctx->streamout.hw_enabled_mask &
+ rctx->streamout.enabled_stream_buffers_mask;
+
+ if (rctx->chip_class >= EVERGREEN) {
+ strmout_buffer_reg = R_028B98_VGT_STRMOUT_BUFFER_CONFIG;
+
+ strmout_config_reg = R_028B94_VGT_STRMOUT_CONFIG;
+ strmout_config_val |=
+ S_028B94_RAST_STREAM(0) |
+ S_028B94_STREAMOUT_1_EN(r600_get_strmout_en(rctx)) |
+ S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
+ S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx));
+ }
+ r600_write_context_reg(rctx->rings.gfx.cs, strmout_buffer_reg, strmout_buffer_val);
+ r600_write_context_reg(rctx->rings.gfx.cs, strmout_config_reg, strmout_config_val);
}
static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
unsigned old_hw_enabled_mask = rctx->streamout.hw_enabled_mask;
rctx->streamout.streamout_enabled = enable;
- rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask;
+
+ rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask |
+ (rctx->streamout.enabled_mask << 4) |
+ (rctx->streamout.enabled_mask << 8) |
+ (rctx->streamout.enabled_mask << 12);
+
if ((old_strmout_en != r600_get_strmout_en(rctx)) ||
(old_hw_enabled_mask != rctx->streamout.hw_enabled_mask))
rctx->streamout.enable_atom.dirty = true;
si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
}
+static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
+{
+ struct pipe_stream_output_info *so = &shader->so;
+ uint32_t enabled_stream_buffers_mask = 0;
+ int i;
+
+ for (i = 0; i < so->num_outputs; i++)
+ enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer);
+ sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
+ sctx->b.streamout.stride_in_dw = shader->so.stride;
+}
+
void si_update_shaders(struct si_context *sctx)
{
struct pipe_context *ctx = (struct pipe_context*)sctx;
} else {
/* TES as VS */
si_pm4_bind_state(sctx, vs, sctx->tes_shader->current->pm4);
- sctx->b.streamout.stride_in_dw = sctx->tes_shader->so.stride;
+ si_update_so(sctx, sctx->tes_shader);
}
} else if (sctx->gs_shader) {
/* VS as ES */
/* VS as VS */
si_shader_select(ctx, sctx->vs_shader);
si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
- sctx->b.streamout.stride_in_dw = sctx->vs_shader->so.stride;
+ si_update_so(sctx, sctx->vs_shader);
}
/* Update GS. */
si_shader_select(ctx, sctx->gs_shader);
si_pm4_bind_state(sctx, gs, sctx->gs_shader->current->pm4);
si_pm4_bind_state(sctx, vs, sctx->gs_shader->current->gs_copy_shader->pm4);
- sctx->b.streamout.stride_in_dw = sctx->gs_shader->so.stride;
+ si_update_so(sctx, sctx->gs_shader);
if (!sctx->gs_rings)
si_init_gs_rings(sctx);