if (tex->num_samplers > 0) {
struct fd_ringbuffer *state =
- fd_ringbuffer_new_object(ctx->pipe, tex->num_samplers * 4);
+ fd_ringbuffer_new_flags(ctx->pipe, tex->num_samplers * 4 * 4,
+ FD_RINGBUFFER_OBJECT | FD_RINGBUFFER_STREAMING);
for (unsigned i = 0; i < tex->num_samplers; i++) {
static const struct fd6_sampler_stateobj dummy_sampler = {};
const struct fd6_sampler_stateobj *sampler = tex->samplers[i] ?
if (tex->num_textures > 0) {
struct fd_ringbuffer *state =
- fd_ringbuffer_new_object(ctx->pipe, tex->num_textures * 16);
+ fd_ringbuffer_new_flags(ctx->pipe, tex->num_textures * 16 * 4,
+ FD_RINGBUFFER_OBJECT | FD_RINGBUFFER_STREAMING);
for (unsigned i = 0; i < tex->num_textures; i++) {
static const struct fd6_pipe_sampler_view dummy_view = {};
const struct fd6_pipe_sampler_view *view = tex->textures[i] ?
OUT_RING(ring, A6XX_SP_FS_OUTPUT_CNTL1_MRT(nr));
}
- ir3_emit_vs_consts(vp, ring, ctx, emit->info);
- if (!emit->key.binning_pass)
- ir3_emit_fs_consts(fp, ring, ctx);
+#define DIRTY_CONST (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST | \
+ FD_DIRTY_SHADER_SSBO | FD_DIRTY_SHADER_IMAGE)
+
+ if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & DIRTY_CONST) {
+ struct fd_ringbuffer *vsconstobj =
+ fd_ringbuffer_new_flags(ctx->pipe, 0x1000,
+ FD_RINGBUFFER_OBJECT | FD_RINGBUFFER_STREAMING);
+
+ ir3_emit_vs_consts(vp, vsconstobj, ctx, emit->info);
+ fd6_emit_add_group(emit, vsconstobj, FD6_GROUP_VS_CONST, 0x7);
+ fd_ringbuffer_del(vsconstobj);
+ }
+
+ if ((ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & DIRTY_CONST) &&
+ !emit->key.binning_pass) {
+ struct fd_ringbuffer *fsconstobj =
+ fd_ringbuffer_new_flags(ctx->pipe, 0x1000,
+ FD_RINGBUFFER_OBJECT | FD_RINGBUFFER_STREAMING);
+
+ ir3_emit_fs_consts(fp, fsconstobj, ctx);
+ fd6_emit_add_group(emit, fsconstobj, FD6_GROUP_FS_CONST, 0x7);
+ fd_ringbuffer_del(fsconstobj);
+ }
struct pipe_stream_output_info *info = &vp->shader->stream_output;
if (info->num_outputs) {
#include "freedreno_resource.h"
+static inline void
+ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
+{
+ /* when we emit const state via ring (IB2) we need a WFI, but when
+ * it is emit'd via stateobj, we don't
+ */
+ if (ring->flags & FD_RINGBUFFER_OBJECT)
+ return;
+
+ fd_wfi(batch, ring);
+}
+
static void
emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
size = MIN2(size, 4 * max_const);
if (size > 0) {
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, 0,
cb->buffer_offset, size,
cb->user_buffer, cb->buffer);
}
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets);
}
}
sizes[off] = sb->sb[index].buffer_size;
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, offset * 4,
0, ARRAY_SIZE(sizes), sizes, NULL);
}
}
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, offset * 4,
0, ARRAY_SIZE(dims), dims, NULL);
}
size *= 4;
if (size > 0) {
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, base,
0, size, v->immediates[0].val, NULL);
}
}
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets);
}
}
{
enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
+ /* When we use CP_SET_DRAW_STATE objects to emit constant state,
+ * if we emit any of it we need to emit all. This is because
+ * we are using the same state-group-id each time for uniform
+ * state, and if previous update is never evaluated (due to no
+ * visible primitives in the current tile) then the new stateobj
+ * completely replaces the old one.
+ *
+ * Possibly if we split up different parts of the const state to
+ * different state-objects we could avoid this.
+ */
+ if (dirty && (ring->flags & FD_RINGBUFFER_OBJECT))
+ dirty = ~0;
+
if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
struct fd_constbuf_stateobj *constbuf;
bool shader_dirty;
vertex_params_size = ARRAY_SIZE(vertex_params);
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
bool needs_vtxid_base =
ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
/* emit compute-shader driver-params: */
uint32_t offset = v->constbase.driver_param;
if (v->constlen > offset) {
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
if (info->indirect) {
struct pipe_resource *indirect = NULL;