* IN THE SOFTWARE.
*/
+#include "main/mtypes.h"
#include "nir.h"
+static void
+set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len)
+{
+ for (int i = 0; i < len; i++) {
+ assert(var->data.location != -1);
+
+ int idx = var->data.location + offset + i;
+ bool is_patch_generic = var->data.patch &&
+ idx != VARYING_SLOT_TESS_LEVEL_INNER &&
+ idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
+ idx != VARYING_SLOT_BOUNDING_BOX0 &&
+ idx != VARYING_SLOT_BOUNDING_BOX1;
+ uint64_t bitfield;
+
+ if (is_patch_generic) {
+ assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
+ bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
+ }
+ else {
+ assert(idx < VARYING_SLOT_MAX);
+ bitfield = BITFIELD64_BIT(idx);
+ }
+
+ if (var->data.mode == nir_var_shader_in) {
+ if (is_patch_generic)
+ shader->info->patch_inputs_read |= bitfield;
+ else
+ shader->info->inputs_read |= bitfield;
+
+ /* double inputs read is only for vertex inputs */
+ if (shader->stage == MESA_SHADER_VERTEX &&
+ glsl_type_is_dual_slot(glsl_without_array(var->type)))
+ shader->info->double_inputs_read |= bitfield;
+
+ if (shader->stage == MESA_SHADER_FRAGMENT) {
+ shader->info->fs.uses_sample_qualifier |= var->data.sample;
+ }
+ } else {
+ assert(var->data.mode == nir_var_shader_out);
+ if (is_patch_generic) {
+ shader->info->patch_outputs_written |= bitfield;
+ } else if (!var->data.read_only) {
+ shader->info->outputs_written |= bitfield;
+ }
+
+ if (var->data.fb_fetch_output)
+ shader->info->outputs_read |= bitfield;
+ }
+ }
+}
+
+/**
+ * Mark an entire variable as used. Caller must ensure that the variable
+ * represents a shader input or output.
+ */
+static void
+mark_whole_variable(nir_shader *shader, nir_variable *var)
+{
+ const struct glsl_type *type = var->type;
+ bool is_vertex_input = false;
+
+ if (nir_is_per_vertex_io(var, shader->stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ if (shader->stage == MESA_SHADER_VERTEX &&
+ var->data.mode == nir_var_shader_in)
+ is_vertex_input = true;
+
+ set_io_mask(shader, var, 0,
+ glsl_count_attribute_slots(type, is_vertex_input));
+}
+
+static unsigned
+get_io_offset(nir_deref_var *deref, bool is_vertex_input)
+{
+ unsigned offset = 0;
+
+ nir_deref *tail = &deref->deref;
+ while (tail->child != NULL) {
+ tail = tail->child;
+
+ if (tail->deref_type == nir_deref_type_array) {
+ nir_deref_array *deref_array = nir_deref_as_array(tail);
+
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ return -1;
+ }
+
+ offset += glsl_count_attribute_slots(tail->type, is_vertex_input) *
+ deref_array->base_offset;
+ }
+ /* TODO: we can get the offset for structs here see nir_lower_io() */
+ }
+
+ return offset;
+}
+
+/**
+ * Try to mark a portion of the given varying as used. Caller must ensure
+ * that the variable represents a shader input or output.
+ *
+ * If the index can't be interpreted as a constant, or some other problem
+ * occurs, then nothing will be marked and false will be returned.
+ */
+static bool
+try_mask_partial_io(nir_shader *shader, nir_deref_var *deref)
+{
+ nir_variable *var = deref->var;
+ const struct glsl_type *type = var->type;
+
+ if (nir_is_per_vertex_io(var, shader->stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ /* The code below only handles:
+ *
+ * - Indexing into matrices
+ * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
+ *
+ * For now, we just give up if we see varying structs and arrays of structs
+ * here marking the entire variable as used.
+ */
+ if (!(glsl_type_is_matrix(type) ||
+ (glsl_type_is_array(type) &&
+ (glsl_type_is_numeric(glsl_without_array(type)) ||
+ glsl_type_is_boolean(glsl_without_array(type)))))) {
+
+ /* If we don't know how to handle this case, give up and let the
+ * caller mark the whole variable as used.
+ */
+ return false;
+ }
+
+ bool is_vertex_input = false;
+ if (shader->stage == MESA_SHADER_VERTEX &&
+ var->data.mode == nir_var_shader_in)
+ is_vertex_input = true;
+
+ unsigned offset = get_io_offset(deref, is_vertex_input);
+ if (offset == -1)
+ return false;
+
+ unsigned num_elems;
+ unsigned elem_width = 1;
+ unsigned mat_cols = 1;
+ if (glsl_type_is_array(type)) {
+ num_elems = glsl_get_aoa_size(type);
+ if (glsl_type_is_matrix(glsl_without_array(type)))
+ mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
+ } else {
+ num_elems = glsl_get_matrix_columns(type);
+ }
+
+ /* double element width for double types that takes two slots */
+ if (!is_vertex_input &&
+ glsl_type_is_dual_slot(glsl_without_array(type))) {
+ elem_width *= 2;
+ }
+
+ if (offset >= num_elems * elem_width * mat_cols) {
+ /* Constant index outside the bounds of the matrix/array. This could
+ * arise as a result of constant folding of a legal GLSL program.
+ *
+ * Even though the spec says that indexing outside the bounds of a
+ * matrix/array results in undefined behaviour, we don't want to pass
+ * out-of-range values to set_io_mask() (since this could result in
+ * slots that don't exist being marked as used), so just let the caller
+ * mark the whole variable as used.
+ */
+ return false;
+ }
+
+ set_io_mask(shader, var, offset, elem_width);
+ return true;
+}
+
static void
gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
{
shader->info->fs.uses_discard = true;
break;
+ case nir_intrinsic_interp_var_at_centroid:
+ case nir_intrinsic_interp_var_at_sample:
+ case nir_intrinsic_interp_var_at_offset:
+ case nir_intrinsic_load_var:
+ case nir_intrinsic_store_var:
+ if (instr->variables[0]->var->data.mode == nir_var_shader_in ||
+ instr->variables[0]->var->data.mode == nir_var_shader_out) {
+ if (!try_mask_partial_io(shader, instr->variables[0]))
+ mark_whole_variable(shader, instr->variables[0]->var);
+ }
+ break;
+
+ case nir_intrinsic_load_draw_id:
case nir_intrinsic_load_front_face:
case nir_intrinsic_load_vertex_id:
case nir_intrinsic_load_vertex_id_zero_base:
case nir_intrinsic_load_base_vertex:
+ case nir_intrinsic_load_base_instance:
case nir_intrinsic_load_instance_id:
case nir_intrinsic_load_sample_id:
case nir_intrinsic_load_sample_pos:
case nir_intrinsic_load_local_invocation_index:
case nir_intrinsic_load_work_group_id:
case nir_intrinsic_load_num_work_groups:
+ case nir_intrinsic_load_tess_coord:
+ case nir_intrinsic_load_tess_level_outer:
+ case nir_intrinsic_load_tess_level_inner:
shader->info->system_values_read |=
(1 << nir_system_value_from_intrinsic(instr->intrinsic));
break;
}
}
-/**
- * Returns the bits in the inputs_read, outputs_written, or
- * system_values_read bitfield corresponding to this variable.
- */
-static inline uint64_t
-get_io_mask(nir_variable *var, gl_shader_stage stage)
-{
- assert(var->data.mode == nir_var_shader_in ||
- var->data.mode == nir_var_shader_out ||
- var->data.mode == nir_var_system_value);
- assert(var->data.location >= 0);
-
- const struct glsl_type *var_type = var->type;
- if (stage == MESA_SHADER_GEOMETRY && var->data.mode == nir_var_shader_in) {
- /* Most geometry shader inputs are per-vertex arrays */
- if (var->data.location >= VARYING_SLOT_VAR0)
- assert(glsl_type_is_array(var_type));
-
- if (glsl_type_is_array(var_type))
- var_type = glsl_get_array_element(var_type);
- }
-
- bool is_vertex_input = (var->data.mode == nir_var_shader_in &&
- stage == MESA_SHADER_VERTEX);
- unsigned slots = glsl_count_attribute_slots(var_type, is_vertex_input);
- return ((1ull << slots) - 1) << var->data.location;
-}
-
void
nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
{
- /* This pass does not yet support tessellation shaders */
- assert(shader->stage == MESA_SHADER_VERTEX ||
- shader->stage == MESA_SHADER_GEOMETRY ||
- shader->stage == MESA_SHADER_FRAGMENT ||
- shader->stage == MESA_SHADER_COMPUTE);
-
- bool uses_sample_qualifier = false;
- shader->info->inputs_read = 0;
- foreach_list_typed(nir_variable, var, node, &shader->inputs) {
- shader->info->inputs_read |= get_io_mask(var, shader->stage);
- uses_sample_qualifier |= var->data.sample;
- }
-
- if (shader->stage == MESA_SHADER_FRAGMENT)
- shader->info->fs.uses_sample_qualifier = uses_sample_qualifier;
-
- /* TODO: Some day we may need to add stream support to NIR */
- shader->info->outputs_written = 0;
- foreach_list_typed(nir_variable, var, node, &shader->outputs)
- shader->info->outputs_written |= get_io_mask(var, shader->stage);
-
- shader->info->system_values_read = 0;
- foreach_list_typed(nir_variable, var, node, &shader->system_values)
- shader->info->system_values_read |= get_io_mask(var, shader->stage);
-
shader->info->num_textures = 0;
shader->info->num_images = 0;
nir_foreach_variable(var, &shader->uniforms) {
}
}
+ shader->info->inputs_read = 0;
+ shader->info->outputs_written = 0;
+ shader->info->outputs_read = 0;
+ shader->info->double_inputs_read = 0;
+ shader->info->patch_inputs_read = 0;
+ shader->info->patch_outputs_written = 0;
+ shader->info->system_values_read = 0;
+ if (shader->stage == MESA_SHADER_FRAGMENT) {
+ shader->info->fs.uses_sample_qualifier = false;
+ }
nir_foreach_block(block, entrypoint) {
gather_info_block(block, shader);
}