dst = NULL;
}
+ const unsigned primitive_param = ctx->so->shader->const_state.offsets.primitive_param * 4;
+ const unsigned primitive_map = ctx->so->shader->const_state.offsets.primitive_map * 4;
+
switch (intr->intrinsic) {
case nir_intrinsic_load_uniform:
idx = nir_intrinsic_base(intr);
ctx->so->shader->ubo_state.size / 16);
}
break;
+
+ case nir_intrinsic_load_vs_primitive_stride_ir3:
+ dst[0] = create_uniform(b, primitive_param + 0);
+ break;
+ case nir_intrinsic_load_vs_vertex_stride_ir3:
+ dst[0] = create_uniform(b, primitive_param + 1);
+ break;
+ case nir_intrinsic_load_primitive_location_ir3:
+ idx = nir_intrinsic_driver_location(intr);
+ dst[0] = create_uniform(b, primitive_map + idx);
+ break;
+
case nir_intrinsic_load_ubo:
emit_intrinsic_load_ubo(ctx, intr, dst);
break;
constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
}
+ switch (shader->type) {
+ case MESA_SHADER_VERTEX:
+ const_state->offsets.primitive_param = constoff;
+ constoff += 1;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ const_state->offsets.primitive_param = constoff;
+ const_state->offsets.primitive_map = constoff + 1;
+ constoff += 1 + DIV_ROUND_UP(nir->num_inputs, 4);
+ break;
+ default:
+ break;
+ }
+
const_state->offsets.immediate = constoff;
}
unsigned image_dims;
unsigned driver_param;
unsigned tfbo;
+ unsigned primitive_param;
+ unsigned primitive_map;
unsigned immediate;
} offsets;
}
}
+static void
+fd6_emit_tess_const(struct fd6_emit *emit)
+{
+ struct fd_context *ctx = emit->ctx;
+ const unsigned vs_regid = emit->vs->shader->const_state.offsets.primitive_param;
+ const unsigned gs_regid = emit->gs->shader->const_state.offsets.primitive_param;
+ uint32_t num_vertices = emit->gs->shader->nir->info.gs.vertices_in;
+
+ uint32_t params[4] = {
+ emit->vs->shader->output_size * num_vertices * 4, /* vs primitive stride */
+ emit->vs->shader->output_size * 4, /* vs vertex stride */
+ 0, 0,
+ };
+
+ struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
+ ctx->batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
+
+ fd6_emit_const(constobj, emit->vs->type, vs_regid * 4, 0, ARRAY_SIZE(params), params, NULL);
+ fd6_emit_const(constobj, emit->gs->type, gs_regid * 4, 0, ARRAY_SIZE(params), params, NULL);
+
+ fd6_emit_take_group(emit, constobj, FD6_GROUP_PRIMITIVE_PARAMS, 0x7);
+}
static void
fd6_emit_consts(struct fd6_emit *emit, const struct ir3_shader_variant *v,
fd6_emit_consts(emit, gs, PIPE_SHADER_GEOMETRY, FD6_GROUP_GS_CONST, 0x7);
fd6_emit_consts(emit, fs, PIPE_SHADER_FRAGMENT, FD6_GROUP_FS_CONST, 0x6);
+ if (emit->key.key.has_gs)
+ fd6_emit_tess_const(emit);
+
/* if driver-params are needed, emit each time: */
if (ir3_needs_vs_driver_params(vs)) {
struct fd_ringbuffer *dpconstobj = fd_submit_new_ringbuffer(
FD6_GROUP_GS_CONST,
FD6_GROUP_FS_CONST,
FD6_GROUP_VS_DRIVER_PARAMS,
+ FD6_GROUP_PRIMITIVE_PARAMS,
FD6_GROUP_VS_TEX,
FD6_GROUP_HS_TEX,
FD6_GROUP_DS_TEX,
A6XX_VPC_PACK_PSIZELOC(psize_loc) |
A6XX_VPC_PACK_STRIDE_IN_VPC(l.max_loc));
+ if (gs) {
+ ir3_emit_immediates(screen, gs, ring);
+ ir3_emit_link_map(screen, vs, gs, ring);
+ }
+
if (!binning_pass) {
/* figure out VARYING_INTERP / VARYING_PS_REPL register values: */
for (j = -1; (j = ir3_next_varying(fs, j)) < (int)fs->inputs_count; ) {
ir3_emit_immediates(screen, ds, ring);
}
- if (gs) {
- ir3_emit_immediates(screen, gs, ring);
- }
-
if (!binning_pass)
ir3_emit_immediates(screen, fs, ring);
}
}
}
+static uint32_t
+link_geometry_stages(const struct ir3_shader_variant *producer,
+ const struct ir3_shader_variant *consumer,
+ uint32_t *locs)
+{
+ uint32_t num_loc = 0;
+
+ nir_foreach_variable(in_var, &consumer->shader->nir->inputs) {
+ nir_foreach_variable(out_var, &producer->shader->nir->outputs) {
+ if (in_var->data.location == out_var->data.location) {
+ locs[in_var->data.driver_location] =
+ producer->shader->output_loc[out_var->data.driver_location] * 4;
+
+ debug_assert(num_loc <= in_var->data.driver_location + 1);
+ num_loc = in_var->data.driver_location + 1;
+ }
+ }
+ }
+
+ return num_loc;
+}
+
+void
+ir3_emit_link_map(struct fd_screen *screen,
+ const struct ir3_shader_variant *producer,
+ const struct ir3_shader_variant *v, struct fd_ringbuffer *ring)
+{
+ const struct ir3_const_state *const_state = &v->shader->const_state;
+ uint32_t base = const_state->offsets.primitive_map;
+ uint32_t patch_locs[MAX_VARYING] = { }, num_loc;
+
+ num_loc = link_geometry_stages(producer, v, patch_locs);
+
+ int size = DIV_ROUND_UP(num_loc, 4);
+
+ /* truncate size to avoid writing constants that shader
+ * does not use:
+ */
+ size = MIN2(size + base, v->constlen) - base;
+
+ /* convert out of vec4: */
+ base *= 4;
+ size *= 4;
+
+ if (size > 0)
+ emit_const(screen, ring, v, base, 0, size, patch_locs, NULL);
+}
+
/* emit stream-out buffers: */
static void
emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si);
void ir3_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring);
+void ir3_emit_link_map(struct fd_screen *screen,
+ const struct ir3_shader_variant *producer,
+ const struct ir3_shader_variant *v, struct fd_ringbuffer *ring);
static inline bool
ir3_needs_vs_driver_params(const struct ir3_shader_variant *v)