* non-global VS user SGPRs. */
sctx->shader_pointers_dirty |= SI_DESCS_SHADER_MASK(VERTEX);
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
}
static unsigned u_max_sample(struct pipe_resource *r)
sctx->dbcb_depth_copy_enabled = true;
if (planes & PIPE_MASK_S)
sctx->dbcb_stencil_copy_enabled = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
assert(sctx->dbcb_depth_copy_enabled || sctx->dbcb_stencil_copy_enabled);
for (sample = first_sample; sample <= last_sample; sample++) {
if (sample != sctx->dbcb_copy_sample) {
sctx->dbcb_copy_sample = sample;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
si_blitter_begin(sctx, SI_DECOMPRESS);
sctx->decompression_enabled = false;
sctx->dbcb_depth_copy_enabled = false;
sctx->dbcb_stencil_copy_enabled = false;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
return fully_copied_levels;
}
sctx->db_flush_stencil_inplace = true;
if (planes & PIPE_MASK_Z)
sctx->db_flush_depth_inplace = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
surf_tmpl.format = texture->resource.b.b.format;
sctx->decompression_enabled = false;
sctx->db_flush_depth_inplace = false;
sctx->db_flush_stencil_inplace = false;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
/* Helper function of si_flush_depth_texture: decompress the given levels
si_set_clear_color(tex, fb->cbufs[i]->format, color);
sctx->framebuffer.dirty_cbufs |= 1 << i;
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
*buffers &= ~clear_bit;
}
}
zstex->depth_clear_value = depth;
sctx->framebuffer.dirty_zsbuf = true;
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom); /* updates DB_DEPTH_CLEAR */
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer); /* updates DB_DEPTH_CLEAR */
sctx->db_depth_clear = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
/* TC-compatible HTILE only supports stencil clears to 0. */
zstex->stencil_clear_value = stencil;
sctx->framebuffer.dirty_zsbuf = true;
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom); /* updates DB_STENCIL_CLEAR */
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer); /* updates DB_STENCIL_CLEAR */
sctx->db_stencil_clear = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
/* TODO: Find out what's wrong here. Fast depth clear leads to
sctx->db_depth_clear = false;
sctx->db_depth_disable_expclear = false;
zstex->depth_cleared = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
if (sctx->db_stencil_clear) {
sctx->db_stencil_clear = false;
sctx->db_stencil_disable_expclear = false;
zstex->stencil_cleared = true;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
}
si_upload_compute_shader_descriptors(sctx);
si_emit_compute_shader_pointers(sctx);
- if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
- sctx->atoms.s.render_cond->emit(sctx,
- sctx->atoms.s.render_cond);
- si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+ if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
+ sctx->atoms.s.render_cond.emit(sctx,
+ &sctx->atoms.s.render_cond);
+ si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
}
if ((program->input_size ||
if (!sctx->scratch_buffer)
return;
- si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
}
si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
r600_resource_reference(&desc->buffer, NULL);
desc->gpu_list = NULL;
desc->gpu_address = si_desc_extract_buffer_address(descriptor);
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
return true;
}
assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi);
assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi);
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
return true;
}
* on performance (confirmed by testing). New descriptors are always
* uploaded to a fresh new buffer, so I don't think flushing the const
* cache is needed. */
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
sctx->vertex_buffers_dirty = false;
sctx->vertex_buffer_pointer_dirty = true;
sctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
if (shader == PIPE_SHADER_VERTEX)
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
}
static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
{
sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
- si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
}
sctx->b.make_image_handle_resident = si_make_image_handle_resident;
/* Shader user data. */
- si_init_atom(sctx, &sctx->shader_pointers.atom, &sctx->atoms.s.shader_pointers,
+ si_init_atom(sctx, &sctx->atoms.s.shader_pointers,
si_emit_graphics_shader_pointers);
/* Set default and immutable mappings. */
}
/* This should always be marked as dirty to set the framebuffer scissor
* at least. */
- si_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
- si_mark_atom_dirty(ctx, &ctx->clip_regs);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
/* CLEAR_STATE sets zeros. */
if (!has_clear_state || ctx->clip_state.any_nonzeros)
- si_mark_atom_dirty(ctx, &ctx->clip_state.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
ctx->msaa_sample_locs.nr_samples = 0;
- si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs.atom);
- si_mark_atom_dirty(ctx, &ctx->msaa_config);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_sample_locs);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
/* CLEAR_STATE sets 0xffff. */
if (!has_clear_state || ctx->sample_mask.sample_mask != 0xffff)
- si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
- si_mark_atom_dirty(ctx, &ctx->cb_render_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
/* CLEAR_STATE sets zeros. */
if (!has_clear_state || ctx->blend_color.any_nonzeros)
- si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
- si_mark_atom_dirty(ctx, &ctx->db_render_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
if (ctx->chip_class >= GFX9)
- si_mark_atom_dirty(ctx, &ctx->dpbb_state);
- si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
- si_mark_atom_dirty(ctx, &ctx->spi_map);
- si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
- si_mark_atom_dirty(ctx, &ctx->render_cond_atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
si_all_descriptors_begin_new_cs(ctx);
si_all_resident_buffers_begin_new_cs(ctx);
ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
- si_mark_atom_dirty(ctx, &ctx->scratch_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
if (ctx->scratch_buffer) {
si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
}
};
struct si_blend_color {
- struct si_atom atom;
struct pipe_blend_color state;
bool any_nonzeros;
};
};
struct si_framebuffer {
- struct si_atom atom;
struct pipe_framebuffer_state state;
unsigned colorbuf_enabled_4bit;
unsigned spi_shader_col_format;
};
struct si_scissors {
- struct si_atom atom;
unsigned dirty_mask;
struct pipe_scissor_state states[SI_MAX_VIEWPORTS];
};
struct si_viewports {
- struct si_atom atom;
unsigned dirty_mask;
unsigned depth_range_dirty_mask;
struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
};
struct si_clip_state {
- struct si_atom atom;
struct pipe_clip_state state;
bool any_nonzeros;
};
struct si_sample_locs {
- struct si_atom atom;
unsigned nr_samples;
};
struct si_sample_mask {
- struct si_atom atom;
uint16_t sample_mask;
};
};
struct si_streamout {
- struct si_atom begin_atom;
bool begin_emitted;
unsigned enabled_mask;
unsigned hw_enabled_mask;
/* The state of VGT_STRMOUT_(CONFIG|EN). */
- struct si_atom enable_atom;
bool streamout_enabled;
bool prims_gen_query_enabled;
int num_prims_gen_queries;
/* Atom declarations. */
struct si_framebuffer framebuffer;
struct si_sample_locs msaa_sample_locs;
- struct si_atom db_render_state;
- struct si_atom dpbb_state;
- struct si_atom msaa_config;
struct si_sample_mask sample_mask;
- struct si_atom cb_render_state;
unsigned last_cb_target_mask;
struct si_blend_color blend_color;
- struct si_atom clip_regs;
struct si_clip_state clip_state;
struct si_shader_data shader_pointers;
struct si_stencil_ref stencil_ref;
- struct si_atom spi_map;
struct si_scissors scissors;
struct si_streamout streamout;
struct si_viewports viewports;
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
/* Scratch buffer */
- struct si_atom scratch_state;
struct r600_resource *scratch_buffer;
unsigned scratch_waves;
unsigned spi_tmpring_size;
unsigned num_cs_dw_queries_suspend;
/* Render condition. */
- struct si_atom render_cond_atom;
struct pipe_query *render_cond;
unsigned render_cond_mode;
bool render_cond_invert;
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_query_hw *rquery = (struct si_query_hw *)query;
- struct si_atom *atom = &sctx->render_cond_atom;
+ struct si_atom *atom = &sctx->atoms.s.render_cond;
if (query) {
bool needs_workaround = false;
sctx->b.end_query = si_end_query;
sctx->b.get_query_result = si_get_query_result;
sctx->b.get_query_result_resource = si_get_query_result_resource;
- sctx->render_cond_atom.emit = si_emit_query_predication;
+ sctx->atoms.s.render_cond.emit = si_emit_query_predication;
if (((struct si_screen*)sctx->b.screen)->info.num_render_backends > 0)
sctx->b.render_condition = si_render_condition;
/* Initialize an external atom (owned by ../radeon). */
static void
-si_init_external_atom(struct si_context *sctx, struct si_atom *atom,
- struct si_atom **list_elem)
+si_init_external_atom(struct si_context *sctx, struct si_atom *atom)
{
- atom->id = list_elem - sctx->atoms.array;
- *list_elem = atom;
+ atom->id = atom - sctx->atoms.array;
}
/* Initialize an atom owned by radeonsi. */
void si_init_atom(struct si_context *sctx, struct si_atom *atom,
- struct si_atom **list_elem,
void (*emit_func)(struct si_context *ctx, struct si_atom *state))
{
atom->emit = emit_func;
- atom->id = list_elem - sctx->atoms.array;
- *list_elem = atom;
+ atom->id = atom - sctx->atoms.array;
}
static unsigned si_map_swizzle(unsigned swizzle)
(old_blend->blend_enable_4bit != blend->blend_enable_4bit &&
sctx->framebuffer.nr_samples >= 2 &&
sctx->screen->dcc_msaa_allowed))
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
if (!old_blend ||
old_blend->cb_target_mask != blend->cb_target_mask ||
old_blend->alpha_to_coverage != blend->alpha_to_coverage ||
old_blend->blend_enable_4bit != blend->blend_enable_4bit ||
old_blend->cb_target_enabled_4bit != blend->cb_target_enabled_4bit))
- si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
if (sctx->screen->has_out_of_order_rast &&
(!old_blend ||
old_blend->cb_target_enabled_4bit != blend->cb_target_enabled_4bit ||
old_blend->commutative_4bit != blend->commutative_4bit ||
old_blend->logicop_enable != blend->logicop_enable)))
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
}
static void si_delete_blend_state(struct pipe_context *ctx, void *state)
sctx->blend_color.state = *state;
sctx->blend_color.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0;
- si_mark_atom_dirty(sctx, &sctx->blend_color.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.blend_color);
}
static void si_emit_blend_color(struct si_context *sctx, struct si_atom *atom)
sctx->clip_state.state = *state;
sctx->clip_state.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0;
- si_mark_atom_dirty(sctx, &sctx->clip_state.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_state);
cb.buffer = NULL;
cb.user_buffer = state->ucp;
return;
if (!old_rs || old_rs->multisample_enable != rs->multisample_enable) {
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
/* Update the small primitive filter workaround if necessary. */
if (sctx->screen->has_msaa_sample_loc_bug &&
sctx->framebuffer.nr_samples > 1)
- si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
}
sctx->current_vs_state &= C_VS_STATE_CLAMP_VERTEX_COLOR;
old_rs->line_width != rs->line_width ||
old_rs->max_point_size != rs->max_point_size)) {
sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
}
if (!old_rs ||
old_rs->clip_halfz != rs->clip_halfz) {
sctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->viewports.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.viewports);
}
if (!old_rs ||
old_rs->clip_plane_enable != rs->clip_plane_enable ||
old_rs->pa_cl_clip_cntl != rs->pa_cl_clip_cntl)
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
sctx->ia_multi_vgt_param_key.u.line_stipple_enabled =
rs->line_stipple_enable;
return;
sctx->stencil_ref.state = *state;
- si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.stencil_ref);
}
if (memcmp(&dsa->stencil_ref, &sctx->stencil_ref.dsa_part,
sizeof(struct si_dsa_stencil_ref_part)) != 0) {
sctx->stencil_ref.dsa_part = dsa->stencil_ref;
- si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.stencil_ref);
}
if (!old_dsa || old_dsa->alpha_func != dsa->alpha_func)
(old_dsa->depth_enabled != dsa->depth_enabled ||
old_dsa->stencil_enabled != dsa->stencil_enabled ||
old_dsa->db_can_write != dsa->db_can_write)))
- si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
if (sctx->screen->has_out_of_order_rast &&
(!old_dsa ||
memcmp(old_dsa->order_invariance, dsa->order_invariance,
sizeof(old_dsa->order_invariance))))
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
}
static void si_delete_dsa_state(struct pipe_context *ctx, void *state)
/* Occlusion queries. */
if (sctx->occlusion_queries_disabled != !enable) {
sctx->occlusion_queries_disabled = !enable;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
}
void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable)
{
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
bool perfect_enable = sctx->num_perfect_occlusion_queries != 0;
if (perfect_enable != old_perfect_enable)
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
}
void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
si_update_ps_colorbuf0_slot(sctx);
si_update_poly_offset_state(sctx);
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
if (sctx->screen->dpbb_allowed)
- si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
if (sctx->framebuffer.any_dst_linear != old_any_dst_linear)
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
if (sctx->screen->has_out_of_order_rast &&
(sctx->framebuffer.colorbuf_enabled_4bit != old_colorbuf_enabled_4bit ||
!!sctx->framebuffer.state.zsbuf != old_has_zsbuf ||
(zstex && zstex->surface.has_stencil != old_has_stencil)))
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
if (sctx->framebuffer.nr_samples != old_nr_samples) {
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
/* Set sample locations as fragment shader constants. */
switch (sctx->framebuffer.nr_samples) {
constbuf.buffer_size = sctx->framebuffer.nr_samples * 2 * 4;
si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS, &constbuf);
- si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
}
sctx->do_update_shaders = true;
void si_update_ps_iter_samples(struct si_context *sctx)
{
if (sctx->framebuffer.nr_samples > 1)
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
if (sctx->screen->dpbb_allowed)
- si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
}
static void si_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
return;
sctx->sample_mask.sample_mask = sample_mask;
- si_mark_atom_dirty(sctx, &sctx->sample_mask.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.sample_mask);
}
static void si_emit_sample_mask(struct si_context *sctx, struct si_atom *atom)
void si_init_state_functions(struct si_context *sctx)
{
- si_init_external_atom(sctx, &sctx->render_cond_atom, &sctx->atoms.s.render_cond);
- si_init_external_atom(sctx, &sctx->streamout.begin_atom, &sctx->atoms.s.streamout_begin);
- si_init_external_atom(sctx, &sctx->streamout.enable_atom, &sctx->atoms.s.streamout_enable);
- si_init_external_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors);
- si_init_external_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports);
-
- si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
- si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
- si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
- si_init_atom(sctx, &sctx->dpbb_state, &sctx->atoms.s.dpbb_state, si_emit_dpbb_state);
- si_init_atom(sctx, &sctx->msaa_config, &sctx->atoms.s.msaa_config, si_emit_msaa_config);
- si_init_atom(sctx, &sctx->sample_mask.atom, &sctx->atoms.s.sample_mask, si_emit_sample_mask);
- si_init_atom(sctx, &sctx->cb_render_state, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state);
- si_init_atom(sctx, &sctx->blend_color.atom, &sctx->atoms.s.blend_color, si_emit_blend_color);
- si_init_atom(sctx, &sctx->clip_regs, &sctx->atoms.s.clip_regs, si_emit_clip_regs);
- si_init_atom(sctx, &sctx->clip_state.atom, &sctx->atoms.s.clip_state, si_emit_clip_state);
- si_init_atom(sctx, &sctx->stencil_ref.atom, &sctx->atoms.s.stencil_ref, si_emit_stencil_ref);
+ si_init_external_atom(sctx, &sctx->atoms.s.render_cond);
+ si_init_external_atom(sctx, &sctx->atoms.s.streamout_begin);
+ si_init_external_atom(sctx, &sctx->atoms.s.streamout_enable);
+ si_init_external_atom(sctx, &sctx->atoms.s.scissors);
+ si_init_external_atom(sctx, &sctx->atoms.s.viewports);
+
+ si_init_atom(sctx, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
+ si_init_atom(sctx, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
+ si_init_atom(sctx, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
+ si_init_atom(sctx, &sctx->atoms.s.dpbb_state, si_emit_dpbb_state);
+ si_init_atom(sctx, &sctx->atoms.s.msaa_config, si_emit_msaa_config);
+ si_init_atom(sctx, &sctx->atoms.s.sample_mask, si_emit_sample_mask);
+ si_init_atom(sctx, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state);
+ si_init_atom(sctx, &sctx->atoms.s.blend_color, si_emit_blend_color);
+ si_init_atom(sctx, &sctx->atoms.s.clip_regs, si_emit_clip_regs);
+ si_init_atom(sctx, &sctx->atoms.s.clip_state, si_emit_clip_state);
+ si_init_atom(sctx, &sctx->atoms.s.stencil_ref, si_emit_stencil_ref);
sctx->b.create_blend_state = si_create_blend_state;
sctx->b.bind_blend_state = si_bind_blend_state;
};
struct si_stencil_ref {
- struct si_atom atom;
struct pipe_stencil_ref state;
struct si_dsa_stencil_ref_part dsa_part;
};
union si_state_atoms {
struct {
/* The order matters. */
- struct si_atom *render_cond;
- struct si_atom *streamout_begin;
- struct si_atom *streamout_enable; /* must be after streamout_begin */
- struct si_atom *framebuffer;
- struct si_atom *msaa_sample_locs;
- struct si_atom *db_render_state;
- struct si_atom *dpbb_state;
- struct si_atom *msaa_config;
- struct si_atom *sample_mask;
- struct si_atom *cb_render_state;
- struct si_atom *blend_color;
- struct si_atom *clip_regs;
- struct si_atom *clip_state;
- struct si_atom *shader_pointers;
- struct si_atom *scissors;
- struct si_atom *viewports;
- struct si_atom *stencil_ref;
- struct si_atom *spi_map;
- struct si_atom *scratch_state;
+ struct si_atom render_cond;
+ struct si_atom streamout_begin;
+ struct si_atom streamout_enable; /* must be after streamout_begin */
+ struct si_atom framebuffer;
+ struct si_atom msaa_sample_locs;
+ struct si_atom db_render_state;
+ struct si_atom dpbb_state;
+ struct si_atom msaa_config;
+ struct si_atom sample_mask;
+ struct si_atom cb_render_state;
+ struct si_atom blend_color;
+ struct si_atom clip_regs;
+ struct si_atom clip_state;
+ struct si_atom shader_pointers;
+ struct si_atom scissors;
+ struct si_atom viewports;
+ struct si_atom stencil_ref;
+ struct si_atom spi_map;
+ struct si_atom scratch_state;
} s;
- struct si_atom *array[0];
+ struct si_atom array[0];
};
#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
struct si_shader_data {
- struct si_atom atom;
uint32_t sh_base[SI_NUM_SHADERS];
};
struct si_shader_selector;
void si_init_atom(struct si_context *sctx, struct si_atom *atom,
- struct si_atom **list_elem,
void (*emit_func)(struct si_context *ctx, struct si_atom *state));
void si_init_state_functions(struct si_context *sctx);
void si_init_screen_state_functions(struct si_screen *sscreen);
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
while (mask) {
- struct si_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
+ struct si_atom *atom = &sctx->atoms.array[u_bit_scan(&mask)];
atom->emit(sctx, atom);
}
sctx->framebuffer.dirty_cbufs |=
((1 << sctx->framebuffer.state.nr_cbufs) - 1);
sctx->framebuffer.dirty_zsbuf = true;
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
si_update_all_texture_descriptors(sctx);
}
bool new_is_poly = rast_prim >= PIPE_PRIM_TRIANGLES;
if (old_is_poly != new_is_poly) {
sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
}
sctx->current_rast_prim = rast_prim;
* more involved alternative workaround.
*/
if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
- si_is_atom_dirty(sctx, &sctx->scissors.atom)) {
+ si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)) {
sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
si_emit_cache_flush(sctx);
}
unsigned masked_atoms = 0;
if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
- masked_atoms |= 1u << sctx->render_cond_atom.id;
+ masked_atoms |= 1u << sctx->atoms.s.render_cond.id;
if (!si_upload_graphics_shader_descriptors(sctx))
return;
si_emit_cache_flush(sctx);
/* <-- CUs are idle here. */
- if (si_is_atom_dirty(sctx, &sctx->render_cond_atom))
- sctx->render_cond_atom.emit(sctx, NULL);
+ if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
+ sctx->atoms.s.render_cond.emit(sctx, NULL);
sctx->dirty_atoms = 0;
si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
!next_hw_vs_variant ||
old_hw_vs_variant->key.opt.clip_disable !=
next_hw_vs_variant->key.opt.clip_disable))
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
}
static void si_update_common_shader_state(struct si_context *sctx)
if (!old_sel ||
old_sel->info.colors_written != sel->info.colors_written)
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
if (sctx->screen->has_out_of_order_rast &&
(!old_sel ||
old_sel->info.writes_memory != sel->info.writes_memory ||
old_sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] !=
sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]))
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
}
si_set_active_descriptors_for_shader(sctx, sel);
si_update_ps_colorbuf0_slot(sctx);
if (!sctx->scratch_buffer)
return false;
- si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
si_context_add_resource_size(sctx,
&sctx->scratch_buffer->b.b);
}
S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
if (spi_tmpring_size != sctx->spi_tmpring_size) {
sctx->spi_tmpring_size = spi_tmpring_size;
- si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
}
return true;
}
si_update_vgt_shader_config(sctx);
if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
if (sctx->ps_shader.cso) {
unsigned db_shader_control;
sctx->flatshade != rs->flatshade) {
sctx->sprite_coord_enable = rs->sprite_coord_enable;
sctx->flatshade = rs->flatshade;
- si_mark_atom_dirty(sctx, &sctx->spi_map);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
}
if (sctx->screen->rbplus_allowed &&
(!old_ps ||
old_spi_shader_col_format !=
sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format))
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
if (sctx->ps_db_shader_control != db_shader_control) {
sctx->ps_db_shader_control = db_shader_control;
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
if (sctx->screen->dpbb_allowed)
- si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
}
if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing;
- si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
if (sctx->chip_class == SI)
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
if (sctx->framebuffer.nr_samples <= 1)
- si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
}
}
void si_init_shader_functions(struct si_context *sctx)
{
- si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
- si_init_atom(sctx, &sctx->scratch_state, &sctx->atoms.s.scratch_state,
+ si_init_atom(sctx, &sctx->atoms.s.spi_map, si_emit_spi_map);
+ si_init_atom(sctx, &sctx->atoms.s.scratch_state,
si_emit_scratch_state);
sctx->b.create_vs_state = si_create_shader_selector;
if (!sctx->streamout.enabled_mask)
return;
- si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_begin);
si_set_streamout_enable(sctx, true);
}
if (num_targets) {
si_streamout_buffers_dirty(sctx);
} else {
- si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
+ si_set_atom_dirty(sctx, &sctx->atoms.s.streamout_begin, false);
si_set_streamout_enable(sctx, false);
}
if ((old_strmout_en != si_get_strmout_en(sctx)) ||
(old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
- si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
}
void si_update_prims_generated_query_state(struct si_context *sctx,
sctx->streamout.num_prims_gen_queries != 0;
if (old_strmout_en != si_get_strmout_en(sctx))
- si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
}
}
sctx->b.create_stream_output_target = si_create_so_target;
sctx->b.stream_output_target_destroy = si_so_target_destroy;
sctx->b.set_stream_output_targets = si_set_streamout_targets;
- sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
- sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
+ sctx->atoms.s.streamout_begin.emit = si_emit_streamout_begin;
+ sctx->atoms.s.streamout_enable.emit = si_emit_streamout_enable;
}
return;
ctx->scissors.dirty_mask |= ((1 << num_scissors) - 1) << start_slot;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
}
/* Since the guard band disables clipping, we have to clip per-pixel
ctx->viewports.dirty_mask |= mask;
ctx->viewports.depth_range_dirty_mask |= mask;
ctx->scissors.dirty_mask |= mask;
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
}
static void si_emit_one_viewport(struct si_context *ctx,
ctx->vs_disables_clipping_viewport = vs_window_space;
ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
}
/* Viewport index handling. */
return;
if (ctx->scissors.dirty_mask)
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
if (ctx->viewports.dirty_mask ||
ctx->viewports.depth_range_dirty_mask)
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
}
void si_init_viewport_functions(struct si_context *ctx)
{
- ctx->scissors.atom.emit = si_emit_scissors;
- ctx->viewports.atom.emit = si_emit_viewport_states;
+ ctx->atoms.s.scissors.emit = si_emit_scissors;
+ ctx->atoms.s.viewports.emit = si_emit_viewport_states;
ctx->b.set_scissor_states = si_set_scissor_states;
ctx->b.set_viewport_states = si_set_viewport_states;