/* This encapsulates a state or an operation which can emitted into the GPU
* command stream. */
struct r600_atom {
- void (*emit)(struct r600_common_context *ctx, struct r600_atom *state);
+ void (*emit)(struct si_context *ctx, struct r600_atom *state);
unsigned short id;
};
si_update_prims_generated_query_state((void*)ctx, query->b.type, -1);
}
-static void emit_set_predicate(struct r600_common_context *ctx,
+static void emit_set_predicate(struct si_context *ctx,
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
- if (ctx->chip_class >= GFX9) {
+ if (ctx->b.chip_class >= GFX9) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
radeon_emit(cs, op);
radeon_emit(cs, va);
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
-static void r600_emit_query_predication(struct r600_common_context *ctx,
+static void r600_emit_query_predication(struct si_context *ctx,
struct r600_atom *atom)
{
- struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
+ struct r600_query_hw *query = (struct r600_query_hw *)ctx->b.render_cond;
struct r600_query_buffer *qbuf;
uint32_t op;
bool flag_wait, invert;
if (!query)
return;
- invert = ctx->render_cond_invert;
- flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
- ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
+ invert = ctx->b.render_cond_invert;
+ flag_wait = ctx->b.render_cond_mode == PIPE_RENDER_COND_WAIT ||
+ ctx->b.render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
if (query->workaround_buf) {
op = PRED_OP(PREDICATION_OP_BOOL64);
si_emit_compute_shader_pointers(sctx);
if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
- sctx->atoms.s.render_cond->emit(&sctx->b,
+ sctx->atoms.s.render_cond->emit(sctx,
sctx->atoms.s.render_cond);
si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
}
struct r600_atom **list_elem,
void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
{
- atom->emit = (void*)emit_func;
+ atom->emit = emit_func;
atom->id = list_elem - sctx->atoms.array;
*list_elem = atom;
}
while (mask) {
struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
- atom->emit(&sctx->b, atom);
+ atom->emit(sctx, atom);
}
sctx->dirty_atoms &= skip_atom_mask;
/* Set shader pointers after descriptors are uploaded. */
if (si_is_atom_dirty(sctx, shader_pointers))
- shader_pointers->emit(&sctx->b, NULL);
+ shader_pointers->emit(sctx, NULL);
if (si_is_atom_dirty(sctx, &sctx->b.render_cond_atom))
- sctx->b.render_cond_atom.emit(&sctx->b, NULL);
+ sctx->b.render_cond_atom.emit(sctx, NULL);
sctx->dirty_atoms = 0;
si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
radeon_emit(cs, 4); /* poll interval */
}
-static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
+static void si_emit_streamout_begin(struct si_context *sctx, struct r600_atom *atom)
{
- struct si_context *sctx = (struct si_context*)rctx;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
* are no buffers bound.
*/
-static void si_emit_streamout_enable(struct r600_common_context *rctx,
+static void si_emit_streamout_enable(struct si_context *sctx,
struct r600_atom *atom)
{
- struct si_context *sctx = (struct si_context*)rctx;
-
radeon_set_context_reg_seq(sctx->b.gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
radeon_emit(sctx->b.gfx_cs,
S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
radeon_emit(cs, fui(discard_x)); /* R_028BF4_PA_CL_GB_HORZ_DISC_ADJ */
}
-static void si_emit_scissors(struct r600_common_context *rctx, struct r600_atom *atom)
+static void si_emit_scissors(struct si_context *ctx, struct r600_atom *atom)
{
- struct si_context *ctx = (struct si_context *)rctx;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_scissor_state *states = ctx->scissors.states;
unsigned mask = ctx->scissors.dirty_mask;
ctx->viewports.depth_range_dirty_mask = 0;
}
-static void si_emit_viewport_states(struct r600_common_context *rctx,
+static void si_emit_viewport_states(struct si_context *ctx,
struct r600_atom *atom)
{
- struct si_context *ctx = (struct si_context *)rctx;
si_emit_viewports(ctx);
si_emit_depth_ranges(ctx);
}