#include "fd6_const.h"
#include "fd6_pack.h"
+#define emit_const_user fd6_emit_const_user
+#define emit_const_bo fd6_emit_const_bo
#include "ir3_const.h"
/* regid: base const register
* prsc or dwords: buffer containing constant values
* sizedwords: size of const value buffer
*/
-static void
-fd6_emit_const(struct fd_ringbuffer *ring, gl_shader_stage type,
- uint32_t regid, uint32_t offset, uint32_t sizedwords,
- const uint32_t *dwords, struct pipe_resource *prsc)
+void
+fd6_emit_const_user(struct fd_ringbuffer *ring,
+ const struct ir3_shader_variant *v, uint32_t regid,
+ uint32_t sizedwords, const uint32_t *dwords)
{
- if (prsc) {
- struct fd_bo *bo = fd_resource(prsc)->bo;
-
- if (fd6_geom_stage(type)) {
- OUT_PKT(ring, CP_LOAD_STATE6_GEOM,
- CP_LOAD_STATE6_0(
- .dst_off = regid/4,
- .state_type = ST6_CONSTANTS,
- .state_src = SS6_INDIRECT,
- .state_block = fd6_stage2shadersb(type),
- .num_unit = DIV_ROUND_UP(sizedwords, 4)
- ),
- CP_LOAD_STATE6_EXT_SRC_ADDR(
- .bo = bo,
- .bo_offset = offset
- )
- );
- } else {
- OUT_PKT(ring, CP_LOAD_STATE6_FRAG,
- CP_LOAD_STATE6_0(
- .dst_off = regid/4,
- .state_type = ST6_CONSTANTS,
- .state_src = SS6_INDIRECT,
- .state_block = fd6_stage2shadersb(type),
- .num_unit = DIV_ROUND_UP(sizedwords, 4)
- ),
- CP_LOAD_STATE6_EXT_SRC_ADDR(
- .bo = bo,
- .bo_offset = offset
- )
- );
- }
+ emit_const_asserts(ring, v, regid, sizedwords);
+
+ /* NOTE we cheat a bit here, since we know mesa is aligning
+ * the size of the user buffer to 16 bytes. And we want to
+ * cut cycles in a hot path.
+ */
+ uint32_t align_sz = align(sizedwords, 4);
+
+ if (fd6_geom_stage(v->type)) {
+ OUT_PKTBUF(ring, CP_LOAD_STATE6_GEOM, dwords, align_sz,
+ CP_LOAD_STATE6_0(
+ .dst_off = regid/4,
+ .state_type = ST6_CONSTANTS,
+ .state_src = SS6_DIRECT,
+ .state_block = fd6_stage2shadersb(v->type),
+ .num_unit = DIV_ROUND_UP(sizedwords, 4)
+ ),
+ CP_LOAD_STATE6_1(),
+ CP_LOAD_STATE6_2()
+ );
} else {
- /* NOTE we cheat a bit here, since we know mesa is aligning
- * the size of the user buffer to 16 bytes. And we want to
- * cut cycles in a hot path.
- */
- uint32_t align_sz = align(sizedwords, 4);
- dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
-
- if (fd6_geom_stage(type)) {
- OUT_PKTBUF(ring, CP_LOAD_STATE6_GEOM, dwords, align_sz,
- CP_LOAD_STATE6_0(
- .dst_off = regid/4,
- .state_type = ST6_CONSTANTS,
- .state_src = SS6_DIRECT,
- .state_block = fd6_stage2shadersb(type),
- .num_unit = DIV_ROUND_UP(sizedwords, 4)
- ),
- CP_LOAD_STATE6_1(),
- CP_LOAD_STATE6_2()
- );
- } else {
- OUT_PKTBUF(ring, CP_LOAD_STATE6_FRAG, dwords, align_sz,
- CP_LOAD_STATE6_0(
- .dst_off = regid/4,
- .state_type = ST6_CONSTANTS,
- .state_src = SS6_DIRECT,
- .state_block = fd6_stage2shadersb(type),
- .num_unit = DIV_ROUND_UP(sizedwords, 4)
- ),
- CP_LOAD_STATE6_1(),
- CP_LOAD_STATE6_2()
- );
- }
+ OUT_PKTBUF(ring, CP_LOAD_STATE6_FRAG, dwords, align_sz,
+ CP_LOAD_STATE6_0(
+ .dst_off = regid/4,
+ .state_type = ST6_CONSTANTS,
+ .state_src = SS6_DIRECT,
+ .state_block = fd6_stage2shadersb(v->type),
+ .num_unit = DIV_ROUND_UP(sizedwords, 4)
+ ),
+ CP_LOAD_STATE6_1(),
+ CP_LOAD_STATE6_2()
+ );
+ }
+}
+void
+fd6_emit_const_bo(struct fd_ringbuffer *ring,
+ const struct ir3_shader_variant *v, uint32_t regid,
+ uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
+{
+ emit_const_asserts(ring, v, regid, sizedwords);
+
+ if (fd6_geom_stage(v->type)) {
+ OUT_PKT(ring, CP_LOAD_STATE6_GEOM,
+ CP_LOAD_STATE6_0(
+ .dst_off = regid/4,
+ .state_type = ST6_CONSTANTS,
+ .state_src = SS6_INDIRECT,
+ .state_block = fd6_stage2shadersb(v->type),
+ .num_unit = DIV_ROUND_UP(sizedwords, 4)
+ ),
+ CP_LOAD_STATE6_EXT_SRC_ADDR(
+ .bo = bo,
+ .bo_offset = offset
+ )
+ );
+ } else {
+ OUT_PKT(ring, CP_LOAD_STATE6_FRAG,
+ CP_LOAD_STATE6_0(
+ .dst_off = regid/4,
+ .state_type = ST6_CONSTANTS,
+ .state_src = SS6_INDIRECT,
+ .state_block = fd6_stage2shadersb(v->type),
+ .num_unit = DIV_ROUND_UP(sizedwords, 4)
+ ),
+ CP_LOAD_STATE6_EXT_SRC_ADDR(
+ .bo = bo,
+ .bo_offset = offset
+ )
+ );
}
}
return true;
}
-void
-emit_const(struct fd_ringbuffer *ring,
- const struct ir3_shader_variant *v, uint32_t dst_offset,
- uint32_t offset, uint32_t size, const void *user_buffer,
- struct pipe_resource *buffer)
-{
- /* TODO inline this */
- assert(dst_offset + size <= v->constlen * 4);
- fd6_emit_const(ring, v->type, dst_offset,
- offset, size, user_buffer, buffer);
-}
-
static void
-emit_const_bo(struct fd_ringbuffer *ring,
+emit_const_ptrs(struct fd_ringbuffer *ring,
const struct ir3_shader_variant *v, uint32_t dst_offset,
uint32_t num, struct pipe_resource **prscs, uint32_t *offsets)
{
const unsigned regid = const_state->offsets.primitive_param;
int size = MIN2(1 + regid, v->constlen) - regid;
if (size > 0)
- fd6_emit_const(ring, v->type, regid * 4, 0, num_params, params, NULL);
+ fd6_emit_const_user(ring, v, regid * 4, num_params, params);
}
static void
emit->gs->shader->nir->info.gs.vertices_in;
uint32_t vs_params[4] = {
- emit->vs->shader->output_size * num_vertices * 4, /* vs primitive stride */
- emit->vs->shader->output_size * 4, /* vs vertex stride */
+ emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
+ emit->vs->output_size * 4, /* vs vertex stride */
0,
0
};
if (emit->hs) {
uint32_t hs_params[4] = {
- emit->vs->shader->output_size * num_vertices * 4, /* vs primitive stride */
- emit->vs->shader->output_size * 4, /* vs vertex stride */
- emit->hs->shader->output_size,
+ emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
+ emit->vs->output_size * 4, /* vs vertex stride */
+ emit->hs->output_size,
emit->info->vertices_per_patch
};
num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
uint32_t ds_params[4] = {
- emit->ds->shader->output_size * num_vertices * 4, /* ds primitive stride */
- emit->ds->shader->output_size * 4, /* ds vertex stride */
- emit->hs->shader->output_size, /* hs vertex stride (dwords) */
+ emit->ds->output_size * num_vertices * 4, /* ds primitive stride */
+ emit->ds->output_size * 4, /* ds vertex stride */
+ emit->hs->output_size, /* hs vertex stride (dwords) */
emit->hs->shader->nir->info.tess.tcs_vertices_out
};
prev = emit->vs;
uint32_t gs_params[4] = {
- prev->shader->output_size * num_vertices * 4, /* ds primitive stride */
- prev->shader->output_size * 4, /* ds vertex stride */
+ prev->output_size * num_vertices * 4, /* ds primitive stride */
+ prev->output_size * 4, /* ds vertex stride */
0,
0,
};
fd6_emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
{
- if (!v->shader->num_ubos)
- return;
+ const struct ir3_const_state *const_state = ir3_const_state(v);
+ int num_ubos = const_state->num_ubos;
- int num_ubos = v->shader->num_ubos;
+ if (!num_ubos)
+ return;
OUT_PKT7(ring, fd6_stage2opcode(v->type), 3 + (2 * num_ubos));
OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
0);
} else {
OUT_RING(ring, 0xbad00000 | (i << 16));
- OUT_RING(ring, 0xbad00000 | (i << 16));
+ OUT_RING(ring, A6XX_UBO_1_SIZE(0));
}
}
}
static unsigned
user_consts_cmdstream_size(struct ir3_shader_variant *v)
{
- struct ir3_ubo_analysis_state *ubo_state = &v->shader->ubo_state;
+ struct ir3_const_state *const_state = ir3_const_state(v);
+ struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;
if (unlikely(!ubo_state->cmdstream_size)) {
unsigned packets, size;
/* also account for UBO addresses: */
packets += 1;
- size += 2 * v->shader->num_ubos;
+ size += 2 * const_state->num_ubos;
unsigned sizedwords = (4 * packets) + size;
ubo_state->cmdstream_size = sizedwords * 4;