perfect_enable = sctx->b.num_perfect_occlusion_queries != 0;
if (enable != old_enable || perfect_enable != old_perfect_enable) {
- si_set_occlusion_query_state(&sctx->b.b, old_perfect_enable);
+ si_set_occlusion_query_state(sctx, old_perfect_enable);
}
}
}
return;
}
- si_save_qbo_state(&sctx->b.b, &saved_state);
+ si_save_qbo_state(sctx, &saved_state);
r600_get_hw_query_params(sctx, query, index >= 0 ? index : 0, ¶ms);
consts.end_offset = params.end_offset - params.start_offset;
si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
/* Add buffer sizes for memory checking in need_cs_space. */
- si_context_add_resource_size(ctx, &program->shader.bo->b.b);
+ si_context_add_resource_size(sctx, &program->shader.bo->b.b);
/* TODO: add the scratch buffer */
if (info->indirect) {
- si_context_add_resource_size(ctx, info->indirect);
+ si_context_add_resource_size(sctx, info->indirect);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
if (sctx->b.chip_class <= VI &&
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
/* Count memory usage in so that need_cs_space can take it into account. */
- si_context_add_resource_size(&sctx->b.b, dst);
+ si_context_add_resource_size(sctx, dst);
if (src)
- si_context_add_resource_size(&sctx->b.b, src);
+ si_context_add_resource_size(sctx, src);
}
if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
/* RING BUFFERS */
-void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
+void si_set_ring_buffer(struct si_context *sctx, uint slot,
struct pipe_resource *buffer,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
unsigned element_size, unsigned index_stride, uint64_t offset)
{
- struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->rw_buffers;
struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
si_mark_atom_dirty(ctx, &ctx->scratch_state);
if (ctx->scratch_buffer) {
- si_context_add_resource_size(&ctx->b.b,
- &ctx->scratch_buffer->b.b);
+ si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
}
if (ctx->streamout.suspended) {
*/
static inline void
-si_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
+si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
{
- struct si_context *sctx = (struct si_context *)ctx;
struct r600_resource *res = (struct r600_resource *)r;
if (res) {
}
}
-void si_set_occlusion_query_state(struct pipe_context *ctx,
+void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable)
{
- struct si_context *sctx = (struct si_context*)ctx;
-
si_mark_atom_dirty(sctx, &sctx->db_render_state);
bool perfect_enable = sctx->b.num_perfect_occlusion_queries != 0;
si_mark_atom_dirty(sctx, &sctx->msaa_config);
}
-void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
+void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st)
{
- struct si_context *sctx = (struct si_context*)ctx;
-
st->saved_compute = sctx->cs_shader_state.program;
si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
if (vi_dcc_enabled(rtex, surf->base.u.tex.level))
sctx->framebuffer.CB_has_shader_readable_metadata = true;
- si_context_add_resource_size(ctx, surf->base.texture);
+ si_context_add_resource_size(sctx, surf->base.texture);
p_atomic_inc(&rtex->framebuffers_bound);
if (vi_tc_compat_htile_enabled(zstex, surf->base.u.tex.level))
sctx->framebuffer.DB_has_shader_readable_metadata = true;
- si_context_add_resource_size(ctx, surf->base.texture);
+ si_context_add_resource_size(sctx, surf->base.texture);
}
si_update_ps_colorbuf0_slot(sctx);
pipe_resource_reference(&dsti->buffer.resource, buf);
dsti->buffer_offset = src->buffer_offset;
dsti->stride = src->stride;
- si_context_add_resource_size(ctx, buf);
+ si_context_add_resource_size(sctx, buf);
if (buf)
r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
}
enum pipe_shader_type shader,
uint start_slot, uint count,
struct pipe_shader_buffer *sbuf);
-void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
+void si_set_ring_buffer(struct si_context *sctx, uint slot,
struct pipe_resource *buffer,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
unsigned force_level);
void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
void si_update_ps_iter_samples(struct si_context *sctx);
-void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st);
-void si_set_occlusion_query_state(struct pipe_context *ctx,
+void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st);
+void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable);
/* si_state_binning.c */
struct pipe_draw_indirect_info *indirect = info->indirect;
/* Add the buffer size for memory checking in need_cs_space. */
- si_context_add_resource_size(ctx, indirect->buffer);
+ si_context_add_resource_size(sctx, indirect->buffer);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
if (sctx->b.chip_class <= VI) {
/* Set ring bindings. */
if (sctx->esgs_ring) {
assert(sctx->b.chip_class <= VI);
- si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
+ si_set_ring_buffer(sctx, SI_ES_RING_ESGS,
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
true, true, 4, 64, 0);
- si_set_ring_buffer(&sctx->b.b, SI_GS_RING_ESGS,
+ si_set_ring_buffer(sctx, SI_GS_RING_ESGS,
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
false, false, 0, 0, 0);
}
if (sctx->gsvs_ring) {
- si_set_ring_buffer(&sctx->b.b, SI_RING_GSVS,
+ si_set_ring_buffer(sctx, SI_RING_GSVS,
sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
false, false, 0, 0, 0);
}
return false;
si_mark_atom_dirty(sctx, &sctx->scratch_state);
- si_context_add_resource_size(&sctx->b.b,
+ si_context_add_resource_size(sctx,
&sctx->scratch_buffer->b.b);
}
void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type,
unsigned num_layers)
{
- struct pipe_context *pipe = &sctx->b.b;
unsigned vs_blit_property;
void **vs;
}
ureg_END(ureg);
- *vs = ureg_create_shader_and_destroy(ureg, pipe);
+ *vs = ureg_create_shader_and_destroy(ureg, &sctx->b.b);
return *vs;
}
if (!targets[i])
continue;
- si_context_add_resource_size(ctx, targets[i]->buffer);
+ si_context_add_resource_size(sctx, targets[i]->buffer);
enabled_mask |= 1 << i;
if (offsets[i] == ((unsigned)-1))