* Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
* intrinsics into something amenable to the V3D architecture.
*
- * Currently, it splits VS inputs and uniforms into scalars, drops any
- * non-position outputs in coordinate shaders, and fixes up the addressing on
- * indirect uniform loads. FS input and VS output scalarization is handled by
- * nir_lower_io_to_scalar().
+ * Most of the work is turning the VS's store_output intrinsics from working
+ * on a base representing the gallium-level vec4 driver_location to an offset
+ * within the VPM, and emitting the header that's read by the fixed function
+ * hardware between the VS and FS.
+ *
+ * We also adjust the offsets on uniform loads to be in bytes, since that's
+ * what we need for indirect addressing with general TMU access.
*/
+struct v3d_nir_lower_io_state {
+ int pos_vpm_offset;
+ int vp_vpm_offset;
+ int zs_vpm_offset;
+ int rcp_wc_vpm_offset;
+ int psiz_vpm_offset;
+ int varyings_vpm_offset;
+
+ BITSET_WORD varyings_stored[BITSET_WORDS(V3D_MAX_ANY_STAGE_INPUTS)];
+
+ nir_ssa_def *pos[4];
+};
+
static void
-replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
- nir_ssa_def **comps)
+v3d_nir_store_output(nir_builder *b, int base, nir_ssa_def *chan)
{
+ nir_intrinsic_instr *intr =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
+ nir_ssa_dest_init(&intr->instr, &intr->dest,
+ 1, intr->dest.ssa.bit_size, NULL);
+ intr->num_components = 1;
- /* Batch things back together into a vector. This will get split by
- * the later ALU scalarization pass.
- */
- nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
+ intr->src[0] = nir_src_for_ssa(chan);
+ intr->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
- /* Replace the old intrinsic with a reference to our reconstructed
- * vector.
- */
- nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
- nir_instr_remove(&intr->instr);
+ nir_intrinsic_set_base(intr, base);
+ nir_intrinsic_set_write_mask(intr, 0x1);
+ nir_intrinsic_set_component(intr, 0);
+
+ nir_builder_instr_insert(b, &intr->instr);
}
+/* Convert the uniform offset to bytes. If it happens to be a constant,
+ * constant-folding will clean up the shift for us.
+ */
static void
-v3d_nir_lower_output(struct v3d_compile *c, nir_builder *b,
- nir_intrinsic_instr *intr)
+v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
+ nir_intrinsic_instr *intr)
{
- nir_variable *output_var = NULL;
- nir_foreach_variable(var, &c->s->outputs) {
- if (var->data.driver_location == nir_intrinsic_base(intr)) {
- output_var = var;
- break;
+ b->cursor = nir_before_instr(&intr->instr);
+
+ nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
+
+ nir_instr_rewrite_src(&intr->instr,
+ &intr->src[0],
+ nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
+ nir_imm_int(b, 4))));
+}
+
+static int
+v3d_varying_slot_vpm_offset(struct v3d_compile *c, nir_variable *var, int chan)
+{
+ int component = var->data.location_frac + chan;
+
+ for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
+ struct v3d_varying_slot slot = c->vs_key->used_outputs[i];
+
+ if (v3d_slot_get_slot(slot) == var->data.location &&
+ v3d_slot_get_component(slot) == component) {
+ return i;
}
}
- assert(output_var);
-
- if (c->vs_key) {
- int slot = output_var->data.location;
- bool used = false;
-
- switch (slot) {
- case VARYING_SLOT_PSIZ:
- case VARYING_SLOT_POS:
- used = true;
- break;
-
- default:
- for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
- if (v3d_slot_get_slot(c->vs_key->fs_inputs[i]) == slot) {
- used = true;
- break;
- }
- }
- break;
- }
- if (!used)
- nir_instr_remove(&intr->instr);
- }
+ return -1;
}
+/* Lowers a store_output(gallium driver location) to a series of store_outputs
+ * with a driver_location equal to the offset in the VPM.
+ */
static void
-v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
- nir_intrinsic_instr *intr)
+v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
+ nir_intrinsic_instr *intr,
+ struct v3d_nir_lower_io_state *state)
{
b->cursor = nir_before_instr(&intr->instr);
- /* Generate scalar loads equivalent to the original vector. */
- nir_ssa_def *dests[4];
- for (unsigned i = 0; i < intr->num_components; i++) {
- nir_intrinsic_instr *intr_comp =
- nir_intrinsic_instr_create(c->s, intr->intrinsic);
- intr_comp->num_components = 1;
- nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
-
- /* Convert the uniform offset to bytes. If it happens
- * to be a constant, constant-folding will clean up
- * the shift for us.
- */
- nir_intrinsic_set_base(intr_comp,
- nir_intrinsic_base(intr) * 16 +
- i * 4);
-
- intr_comp->src[0] =
- nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
- nir_imm_int(b, 4)));
-
- dests[i] = &intr_comp->dest.ssa;
-
- nir_builder_instr_insert(b, &intr_comp->instr);
+ int start_comp = nir_intrinsic_component(intr);
+ nir_ssa_def *src = nir_ssa_for_src(b, intr->src[0],
+ intr->num_components);
+
+ nir_variable *var = NULL;
+ nir_foreach_variable(scan_var, &c->s->outputs) {
+ if (scan_var->data.driver_location != nir_intrinsic_base(intr) ||
+ start_comp < scan_var->data.location_frac ||
+ start_comp >= scan_var->data.location_frac +
+ glsl_get_components(scan_var->type)) {
+ continue;
+ }
+ var = scan_var;
+ }
+ assert(var);
+
+ /* Save off the components of the position for the setup of VPM inputs
+ * read by fixed function HW.
+ */
+ if (var->data.location == VARYING_SLOT_POS) {
+ for (int i = 0; i < intr->num_components; i++) {
+ state->pos[start_comp + i] = nir_channel(b, src, i);
+ }
+ }
+
+ /* Just psiz to the position in the FF header right now. */
+ if (var->data.location == VARYING_SLOT_PSIZ &&
+ state->psiz_vpm_offset != -1) {
+ v3d_nir_store_output(b, state->psiz_vpm_offset, src);
}
- replace_intrinsic_with_vec(b, intr, dests);
+ /* Scalarize outputs if it hasn't happened already, since we want to
+ * schedule each VPM write individually. We can skip any outut
+ * components not read by the FS.
+ */
+ for (int i = 0; i < intr->num_components; i++) {
+ int vpm_offset =
+ v3d_varying_slot_vpm_offset(c, var,
+ i +
+ start_comp -
+ var->data.location_frac);
+
+ if (vpm_offset == -1)
+ continue;
+
+ BITSET_SET(state->varyings_stored, vpm_offset);
+
+ v3d_nir_store_output(b, state->varyings_vpm_offset + vpm_offset,
+ nir_channel(b, src, i));
+ }
+
+ nir_instr_remove(&intr->instr);
}
static void
v3d_nir_lower_io_instr(struct v3d_compile *c, nir_builder *b,
- struct nir_instr *instr)
+ struct nir_instr *instr,
+ struct v3d_nir_lower_io_state *state)
{
if (instr->type != nir_instr_type_intrinsic)
return;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
- case nir_intrinsic_load_input:
+ case nir_intrinsic_load_uniform:
+ v3d_nir_lower_uniform(c, b, intr);
break;
case nir_intrinsic_store_output:
- v3d_nir_lower_output(c, b, intr);
- break;
-
- case nir_intrinsic_load_uniform:
- v3d_nir_lower_uniform(c, b, intr);
+ if (c->s->info.stage == MESA_SHADER_VERTEX)
+ v3d_nir_lower_vpm_output(c, b, intr, state);
break;
- case nir_intrinsic_load_user_clip_plane:
default:
break;
}
}
-static bool
-v3d_nir_lower_io_impl(struct v3d_compile *c, nir_function_impl *impl)
+/* Remap the output var's .driver_location. This is purely for
+ * nir_print_shader() so that store_output can map back to a variable name.
+ */
+static void
+v3d_nir_lower_io_update_output_var_base(struct v3d_compile *c,
+ struct v3d_nir_lower_io_state *state)
+{
+ nir_foreach_variable_safe(var, &c->s->outputs) {
+ if (var->data.location == VARYING_SLOT_POS &&
+ state->pos_vpm_offset != -1) {
+ var->data.driver_location = state->pos_vpm_offset;
+ continue;
+ }
+
+ if (var->data.location == VARYING_SLOT_PSIZ &&
+ state->psiz_vpm_offset != -1) {
+ var->data.driver_location = state->psiz_vpm_offset;
+ continue;
+ }
+
+ int vpm_offset = v3d_varying_slot_vpm_offset(c, var, 0);
+ if (vpm_offset != -1) {
+ var->data.driver_location =
+ state->varyings_vpm_offset + vpm_offset;
+ } else {
+ /* If we couldn't find a mapping for the var, delete
+ * it so that its old .driver_location doesn't confuse
+ * nir_print_shader().
+ */
+ exec_node_remove(&var->node);
+ }
+ }
+}
+
+static void
+v3d_nir_setup_vpm_layout(struct v3d_compile *c,
+ struct v3d_nir_lower_io_state *state)
+{
+ uint32_t vpm_offset = 0;
+
+ if (c->vs_key->is_coord) {
+ state->pos_vpm_offset = vpm_offset;
+ vpm_offset += 4;
+ } else {
+ state->pos_vpm_offset = -1;
+ }
+
+ state->vp_vpm_offset = vpm_offset;
+ vpm_offset += 2;
+
+ if (!c->vs_key->is_coord) {
+ state->zs_vpm_offset = vpm_offset++;
+ state->rcp_wc_vpm_offset = vpm_offset++;
+ } else {
+ state->zs_vpm_offset = -1;
+ state->rcp_wc_vpm_offset = -1;
+ }
+
+ if (c->vs_key->per_vertex_point_size)
+ state->psiz_vpm_offset = vpm_offset++;
+ else
+ state->psiz_vpm_offset = -1;
+
+ state->varyings_vpm_offset = vpm_offset;
+
+ c->vpm_output_size = vpm_offset + c->vs_key->num_used_outputs;
+}
+
+static void
+v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
+ struct v3d_nir_lower_io_state *state)
{
- nir_builder b;
- nir_builder_init(&b, impl);
+ for (int i = 0; i < 4; i++) {
+ if (!state->pos[i])
+ state->pos[i] = nir_ssa_undef(b, 1, 32);
+ }
+
+ nir_ssa_def *rcp_wc = nir_frcp(b, state->pos[3]);
+
+ if (state->pos_vpm_offset != -1) {
+ for (int i = 0; i < 4; i++) {
+ v3d_nir_store_output(b, state->pos_vpm_offset + i,
+ state->pos[i]);
+ }
+ }
+
+ for (int i = 0; i < 2; i++) {
+ nir_ssa_def *pos;
+ nir_ssa_def *scale;
+ pos = state->pos[i];
+ if (i == 0)
+ scale = nir_load_viewport_x_scale(b);
+ else
+ scale = nir_load_viewport_y_scale(b);
+ pos = nir_fmul(b, pos, scale);
+ pos = nir_fmul(b, pos, rcp_wc);
+ pos = nir_f2i32(b, nir_fround_even(b, pos));
+ v3d_nir_store_output(b, state->vp_vpm_offset + i,
+ pos);
+ }
- nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(instr, block)
- v3d_nir_lower_io_instr(c, &b, instr);
+ if (state->zs_vpm_offset != -1) {
+ nir_ssa_def *z = state->pos[2];
+ z = nir_fmul(b, z, nir_load_viewport_z_scale(b));
+ z = nir_fmul(b, z, rcp_wc);
+ z = nir_fadd(b, z, nir_load_viewport_z_offset(b));
+ v3d_nir_store_output(b, state->zs_vpm_offset, z);
}
- nir_metadata_preserve(impl, nir_metadata_block_index |
- nir_metadata_dominance);
+ if (state->rcp_wc_vpm_offset != -1)
+ v3d_nir_store_output(b, state->rcp_wc_vpm_offset, rcp_wc);
- return true;
+ /* Store 0 to varyings requested by the FS but not stored in the VS.
+ * This should be undefined behavior, but glsl-routing seems to rely
+ * on it.
+ */
+ for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
+ if (!BITSET_TEST(state->varyings_stored, i)) {
+ v3d_nir_store_output(b, state->varyings_vpm_offset + i,
+ nir_imm_int(b, 0));
+ }
+ }
}
void
v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c)
{
+ struct v3d_nir_lower_io_state state = { 0 };
+
+ /* Set up the layout of the VPM outputs. */
+ if (s->info.stage == MESA_SHADER_VERTEX)
+ v3d_nir_setup_vpm_layout(c, &state);
+
nir_foreach_function(function, s) {
- if (function->impl)
- v3d_nir_lower_io_impl(c, function->impl);
+ if (function->impl) {
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block)
+ v3d_nir_lower_io_instr(c, &b, instr,
+ &state);
+ }
+
+ nir_block *last = nir_impl_last_block(function->impl);
+ b.cursor = nir_after_block(last);
+ if (s->info.stage == MESA_SHADER_VERTEX)
+ v3d_nir_emit_ff_vpm_outputs(c, &b, &state);
+
+ nir_metadata_preserve(function->impl,
+ nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
}
+
+ if (s->info.stage == MESA_SHADER_VERTEX)
+ v3d_nir_lower_io_update_output_var_base(c, &state);
}