(struct pipe_stream_output_target**)sctx->b.streamout.targets);
if (op & SI_SAVE_FRAMEBUFFER)
- util_blitter_save_framebuffer(sctx->blitter, &sctx->framebuffer);
+ util_blitter_save_framebuffer(sctx->blitter, &sctx->framebuffer.state);
if (op & SI_SAVE_TEXTURES) {
util_blitter_save_fragment_sampler_states(
double depth, unsigned stencil)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct pipe_framebuffer_state *fb = &sctx->framebuffer;
+ struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
si_blitter_begin(ctx, SI_CLEAR);
util_blitter_clear(sctx->blitter, fb->width, fb->height,
sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_inplace);
sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve);
sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress);
- util_unreference_framebuffer_state(&sctx->framebuffer);
+ util_unreference_framebuffer_state(&sctx->framebuffer.state);
util_blitter_destroy(sctx->blitter);
unsigned n_samplers;
};
+struct si_framebuffer {
+ struct pipe_framebuffer_state state;
+ unsigned log_samples;
+ unsigned cb0_is_integer;
+ unsigned compressed_cb_mask;
+ unsigned export_16bpc;
+};
+
#define SI_NUM_ATOMS(sctx) (sizeof((sctx)->atoms)/sizeof((sctx)->atoms.array[0]))
#define SI_NUM_SHADERS (PIPE_SHADER_GEOMETRY+1)
struct r600_atom *array[0];
} atoms;
+ struct si_framebuffer framebuffer;
struct si_vertex_element *vertex_elements;
- struct pipe_framebuffer_state framebuffer;
- unsigned fb_log_samples;
- unsigned fb_cb0_is_integer;
- unsigned fb_compressed_cb_mask;
unsigned pa_sc_line_stipple;
unsigned pa_su_sc_mode_cntl;
/* for saving when using blitter */
struct si_cs_shader_state cs_shader_state;
/* shader information */
unsigned sprite_coord_enable;
- unsigned export_16bpc;
struct si_buffer_resources const_buffers[SI_NUM_SHADERS];
struct si_buffer_resources rw_buffers[SI_NUM_SHADERS];
struct si_textures_info samplers[SI_NUM_SHADERS];
if (pm4 == NULL)
return;
- mask = (1ULL << ((unsigned)sctx->framebuffer.nr_cbufs * 4)) - 1;
+ mask = (1ULL << ((unsigned)sctx->framebuffer.state.nr_cbufs * 4)) - 1;
mask &= blend->cb_target_mask;
si_pm4_set_reg(pm4, R_028238_CB_TARGET_MASK, mask);
struct si_pm4_state *pm4;
float offset_units;
- if (!rs || !sctx->framebuffer.zsbuf)
+ if (!rs || !sctx->framebuffer.state.zsbuf)
return;
offset_units = sctx->queued.named.rasterizer->offset_units;
- switch (sctx->framebuffer.zsbuf->texture->format) {
+ switch (sctx->framebuffer.state.zsbuf->texture->format) {
case PIPE_FORMAT_S8_UINT_Z24_UNORM:
case PIPE_FORMAT_X8Z24_UNORM:
case PIPE_FORMAT_Z24X8_UNORM:
uint64_t z_offs, s_offs;
uint32_t db_htile_data_base, db_htile_surface, pa_su_poly_offset_db_fmt_cntl;
- switch (sctx->framebuffer.zsbuf->texture->format) {
+ switch (sctx->framebuffer.state.zsbuf->texture->format) {
case PIPE_FORMAT_S8_UINT_Z24_UNORM:
case PIPE_FORMAT_X8Z24_UNORM:
case PIPE_FORMAT_Z24X8_UNORM:
if (pm4 == NULL)
return;
- if (sctx->framebuffer.nr_cbufs) {
+ if (sctx->framebuffer.state.nr_cbufs) {
sctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_CB_META;
}
- if (sctx->framebuffer.zsbuf) {
+ if (sctx->framebuffer.state.zsbuf) {
sctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
}
- util_copy_framebuffer_state(&sctx->framebuffer, state);
+ util_copy_framebuffer_state(&sctx->framebuffer.state, state);
/* build states */
- sctx->export_16bpc = 0;
- sctx->fb_compressed_cb_mask = 0;
+ sctx->framebuffer.export_16bpc = 0;
+ sctx->framebuffer.compressed_cb_mask = 0;
for (i = 0; i < state->nr_cbufs; i++) {
if (!state->cbufs[i]) {
}
if (surf->export_16bpc) {
- sctx->export_16bpc |= 1 << i;
+ sctx->framebuffer.export_16bpc |= 1 << i;
}
if (rtex->fmask.size || rtex->cmask.size) {
- sctx->fb_compressed_cb_mask |= 1 << i;
+ sctx->framebuffer.compressed_cb_mask |= 1 << i;
}
si_pm4_add_bo(pm4, &rtex->resource, RADEON_USAGE_READWRITE,
si_pm4_set_reg(pm4, R_028C70_CB_COLOR0_INFO + 1 * 0x3C, surf->cb_color_info);
/* Also set the 16BPC export. */
if (surf->export_16bpc) {
- sctx->export_16bpc |= 1 << 1;
+ sctx->framebuffer.export_16bpc |= 1 << 1;
}
i++;
}
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
}
- assert(!(sctx->export_16bpc & ~0xff));
+ assert(!(sctx->framebuffer.export_16bpc & ~0xff));
if (state->zsbuf) {
surf = (struct r600_surface*)state->zsbuf;
nr_samples = util_framebuffer_get_num_samples(state);
si_set_msaa_state(sctx, pm4, nr_samples);
- sctx->fb_log_samples = util_logbase2(nr_samples);
- sctx->fb_cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
+ sctx->framebuffer.log_samples = util_logbase2(nr_samples);
+ sctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
util_format_is_pure_integer(state->cbufs[0]->format);
si_pm4_set_state(sctx, framebuffer, pm4);
key->vs.as_es = sctx->gs_shader != NULL;
} else if (sel->type == PIPE_SHADER_FRAGMENT) {
if (sel->fs_write_all)
- key->ps.nr_cbufs = sctx->framebuffer.nr_cbufs;
- key->ps.export_16bpc = sctx->export_16bpc;
+ key->ps.nr_cbufs = sctx->framebuffer.state.nr_cbufs;
+ key->ps.export_16bpc = sctx->framebuffer.export_16bpc;
if (sctx->queued.named.rasterizer) {
key->ps.color_two_side = sctx->queued.named.rasterizer->two_side;
if (sctx->queued.named.blend) {
key->ps.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
sctx->queued.named.rasterizer->multisample_enable &&
- !sctx->fb_cb0_is_integer;
+ !sctx->framebuffer.cb0_is_integer;
}
}
if (sctx->queued.named.dsa) {
key->ps.alpha_func = sctx->queued.named.dsa->alpha_func;
/* Alpha-test should be disabled if colorbuffer 0 is integer. */
- if (sctx->framebuffer.nr_cbufs &&
- sctx->framebuffer.cbufs[0] &&
- util_format_is_pure_integer(sctx->framebuffer.cbufs[0]->texture->format))
+ if (sctx->framebuffer.cb0_is_integer)
key->ps.alpha_func = PIPE_FUNC_ALWAYS;
} else {
key->ps.alpha_func = PIPE_FUNC_ALWAYS;
return;
db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
- S_02880C_ALPHA_TO_MASK_DISABLE(sctx->fb_cb0_is_integer);
+ S_02880C_ALPHA_TO_MASK_DISABLE(sctx->framebuffer.cb0_is_integer);
for (i = 0; i < shader->shader.ninput; i++) {
switch (shader->shader.input[i].name) {
si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
- shader->cb0_is_integer = sctx->fb_cb0_is_integer;
+ shader->cb0_is_integer = sctx->framebuffer.cb0_is_integer;
shader->sprite_coord_enable = sctx->sprite_coord_enable;
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
si_shader_select(ctx, sctx->ps_shader);
if (!sctx->ps_shader->current->pm4 ||
- sctx->ps_shader->current->cb0_is_integer != sctx->fb_cb0_is_integer)
+ sctx->ps_shader->current->cb0_is_integer != sctx->framebuffer.cb0_is_integer)
si_pipe_shader_ps(ctx, sctx->ps_shader->current);
si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
if (sctx->b.chip_class >= CIK) {
si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->fb_log_samples) |
+ S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples) |
S_028004_ZPASS_ENABLE(1) |
S_028004_SLICE_EVEN_ENABLE(1) |
S_028004_SLICE_ODD_ENABLE(1));
} else {
si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->fb_log_samples));
+ S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples));
}
}
#endif
/* Set the depth buffer as dirty. */
- if (sctx->framebuffer.zsbuf) {
- struct pipe_surface *surf = sctx->framebuffer.zsbuf;
+ if (sctx->framebuffer.state.zsbuf) {
+ struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
struct r600_texture *rtex = (struct r600_texture *)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
}
- if (sctx->fb_compressed_cb_mask) {
+ if (sctx->framebuffer.compressed_cb_mask) {
struct pipe_surface *surf;
struct r600_texture *rtex;
- unsigned mask = sctx->fb_compressed_cb_mask;
+ unsigned mask = sctx->framebuffer.compressed_cb_mask;
do {
unsigned i = u_bit_scan(&mask);
- surf = sctx->framebuffer.cbufs[i];
+ surf = sctx->framebuffer.state.cbufs[i];
rtex = (struct r600_texture*)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;