info->num_tokens = 2; /* indicate that the shader is non-empty */
info->num_instructions = 2;
- info->num_inputs = nir->num_inputs;
- info->num_outputs = nir->num_outputs;
-
if (nir->info.stage == MESA_SHADER_GEOMETRY) {
info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
}
i = 0;
+ uint64_t processed_inputs = 0;
+ unsigned num_inputs = 0;
nir_foreach_variable(variable, &nir->inputs) {
unsigned semantic_name, semantic_index;
unsigned attrib_count = glsl_count_attribute_slots(variable->type,
if (variable->data.pixel_center_integer)
info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
+
+ num_inputs++;
continue;
}
+ i = variable->data.driver_location;
+ if (processed_inputs & ((uint64_t)1 << i))
+ continue;
+
+ processed_inputs |= ((uint64_t)1 << i);
+ num_inputs++;
+
tgsi_get_gl_varying_semantic(variable->data.location, true,
&semantic_name, &semantic_index);
info->colors_read |= 0x0f;
else if (variable->data.location == VARYING_SLOT_COL1)
info->colors_read |= 0xf0;
-
- i++;
}
+ if (nir->info.stage != MESA_SHADER_VERTEX)
+ info->num_inputs = num_inputs;
+ else
+ info->num_inputs = nir->num_inputs;
+
i = 0;
+ uint64_t processed_outputs = 0;
+ unsigned num_outputs = 0;
nir_foreach_variable(variable, &nir->outputs) {
unsigned semantic_name, semantic_index;
&semantic_name, &semantic_index);
}
+ i = variable->data.driver_location;
+ if (processed_outputs & ((uint64_t)1 << i))
+ continue;
+
+ processed_outputs |= ((uint64_t)1 << i);
+ num_outputs++;
+
info->output_semantic_name[i] = semantic_name;
info->output_semantic_index[i] = semantic_index;
info->output_usagemask[i] = TGSI_WRITEMASK_XYZW;
info->writes_position = true;
break;
}
-
- i++;
}
+ info->num_outputs = num_outputs;
+
nir_foreach_variable(variable, &nir->uniforms) {
const struct glsl_type *type = variable->type;
enum glsl_base_type base_type =
NIR_PASS_V(nir, nir_lower_var_copies);
}
+static void
+st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
+{
+ nir_lower_io_arrays_to_elements(*producer, *consumer);
+
+ NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
+ NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+
+ if (nir_remove_unused_varyings(*producer, *consumer)) {
+ NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
+ NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
+
+ /* The backend might not be able to handle indirects on
+ * temporaries so we need to lower indirects on any of the
+ * varyings we have demoted here.
+ *
+ * TODO: radeonsi shouldn't need to do this, however LLVM isn't
+ * currently smart enough to handle indirects without causing excess
+ * spilling causing the gpu to hang.
+ *
+ * See the following thread for more details of the problem:
+ * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
+ */
+ nir_variable_mode indirect_mask = nir_var_local;
+
+ NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask);
+ NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask);
+
+ st_nir_opts(*producer);
+ st_nir_opts(*consumer);
+ }
+}
+
extern "C" {
bool
{
struct st_context *st = st_context(ctx);
+ /* Determine first and last stage. */
+ unsigned first = MESA_SHADER_STAGES;
+ unsigned last = 0;
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (!shader_program->_LinkedShaders[i])
+ continue;
+ if (first == MESA_SHADER_STAGES)
+ first = i;
+ last = i;
+ }
+
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
if (shader == NULL)
continue;
st_nir_get_mesa_program(ctx, shader_program, shader);
+
+ nir_variable_mode mask = (nir_variable_mode) 0;
+ if (i != first)
+ mask = (nir_variable_mode)(mask | nir_var_shader_in);
+
+ if (i != last)
+ mask = (nir_variable_mode)(mask | nir_var_shader_out);
+
+ nir_shader *nir = shader->Program->nir;
+ nir_lower_io_to_scalar_early(nir, mask);
+ st_nir_opts(nir);
+ }
+
+ /* Linking the stages in the opposite order (from fragment to vertex)
+ * ensures that inter-shader outputs written to in an earlier stage
+ * are eliminated if they are (transitively) not used in a later
+ * stage.
+ */
+ int next = last;
+ for (int i = next - 1; i >= 0; i--) {
+ struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
+ if (shader == NULL)
+ continue;
+
+ st_nir_link_shaders(&shader->Program->nir,
+ &shader_program->_LinkedShaders[next]->Program->nir);
+ next = i;
}
+ int prev = -1;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
if (shader == NULL)
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
shader->Program->info = nir->info;
+ if (prev != -1) {
+ nir_compact_varyings(shader_program->_LinkedShaders[prev]->Program->nir,
+ nir, ctx->API != API_OPENGL_COMPAT);
+ }
+ prev = i;
+
st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
assert(shader->Program);