assert(var->data.location >= 0);
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
*
* Example usage is:
*
- * progress = nir_remove_unused_io_vars(producer,
- * &producer->outputs,
+ * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out,
* read, patches_read) ||
* progress;
*
* variable is used!
*/
bool
-nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
+nir_remove_unused_io_vars(nir_shader *shader,
+ nir_variable_mode mode,
uint64_t *used_by_other_stage,
uint64_t *used_by_other_stage_patches)
{
bool progress = false;
uint64_t *used;
+ assert(mode == nir_var_shader_in || mode == nir_var_shader_out);
+ struct exec_list *var_list =
+ mode == nir_var_shader_in ? &shader->inputs : &shader->outputs;
+
nir_foreach_variable_safe(var, var_list) {
if (var->data.patch)
used = used_by_other_stage_patches;
tcs_add_output_reads(producer, read, patches_read);
bool progress = false;
- progress = nir_remove_unused_io_vars(producer, &producer->outputs, read,
+ progress = nir_remove_unused_io_vars(producer, nir_var_shader_out, read,
patches_read);
- progress = nir_remove_unused_io_vars(consumer, &consumer->inputs, written,
+ progress = nir_remove_unused_io_vars(consumer, nir_var_shader_in, written,
patches_written) || progress;
return progress;
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
continue;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, producer->info.stage)) {
+ if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (!vc_info->initialised) {
const struct glsl_type *type = in_var->type;
- if (nir_is_per_vertex_io(in_var, consumer->info.stage)) {
+ if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
+ in_var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
vc_info->is_32bit = glsl_type_is_32bit(type);
vc_info->is_patch = in_var->data.patch;
vc_info->is_intra_stage_only = false;
+ vc_info->initialised = true;
}
}
}
unsigned var_info_idx =
store_varying_info_idx[location][out_var->data.location_frac];
- if (!var_info_idx)
- continue;
+ if (!var_info_idx) {
+ /* Something went wrong, the shader interfaces didn't match, so
+ * abandon packing. This can happen for example when the
+ * inputs are scalars but the outputs are struct members.
+ */
+ *varying_comp_info_size = 0;
+ break;
+ }
struct varying_component *vc_info =
&(*varying_comp_info)[var_info_idx-1];
vc_info->is_32bit = glsl_type_is_32bit(type);
vc_info->is_patch = out_var->data.patch;
vc_info->is_intra_stage_only = true;
+ vc_info->initialised = true;
}
}
}
}
+
+ for (unsigned i = 0; i < *varying_comp_info_size; i++ ) {
+ struct varying_component *vc_info = &(*varying_comp_info)[i];
+ if (!vc_info->initialised) {
+ /* Something went wrong, the shader interfaces didn't match, so
+ * abandon packing. This can happen for example when the outputs are
+ * scalars but the inputs are struct members.
+ */
+ *varying_comp_info_size = 0;
+ break;
+ }
+ }
}
static void
bool last_partial = false;
nir_foreach_variable(var, var_list) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
unsigned var_size;
if (var->data.compact) {
+ /* If we are inside a partial compact,
+ * don't allow another compact to be in this slot
+ * if it starts at component 0.
+ */
+ if (last_partial && var->data.location_frac == 0) {
+ location++;
+ }
+
/* compact variables must be arrays of scalars */
assert(glsl_type_is_array(type));
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
*size = location;
}
+static uint64_t
+get_linked_variable_location(unsigned location, bool patch)
+{
+ if (!patch)
+ return location;
+
+ /* Reserve locations 0...3 for special patch variables
+ * like tess factors and bounding boxes, and the generic patch
+ * variables will come after them.
+ */
+ if (location >= VARYING_SLOT_PATCH0)
+ return location - VARYING_SLOT_PATCH0 + 4;
+ else if (location >= VARYING_SLOT_TESS_LEVEL_OUTER &&
+ location <= VARYING_SLOT_BOUNDING_BOX1)
+ return location - VARYING_SLOT_TESS_LEVEL_OUTER;
+ else
+ unreachable("Unsupported variable in get_linked_variable_location.");
+}
+
+static uint64_t
+get_linked_variable_io_mask(nir_variable *variable, gl_shader_stage stage)
+{
+ const struct glsl_type *type = variable->type;
+
+ if (nir_is_per_vertex_io(variable, stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ unsigned slots = glsl_count_attribute_slots(type, false);
+ if (variable->data.compact) {
+ unsigned component_count = variable->data.location_frac + glsl_get_length(type);
+ slots = DIV_ROUND_UP(component_count, 4);
+ }
+
+ uint64_t mask = u_bit_consecutive64(0, slots);
+ return mask;
+}
+
+nir_linked_io_var_info
+nir_assign_linked_io_var_locations(nir_shader *producer, nir_shader *consumer)
+{
+ assert(producer);
+ assert(consumer);
+
+ uint64_t producer_output_mask = 0;
+ uint64_t producer_patch_output_mask = 0;
+
+ nir_foreach_variable(variable, &producer->outputs) {
+ uint64_t mask = get_linked_variable_io_mask(variable, producer->info.stage);
+ uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
+
+ if (variable->data.patch)
+ producer_patch_output_mask |= mask << loc;
+ else
+ producer_output_mask |= mask << loc;
+ }
+
+ uint64_t consumer_input_mask = 0;
+ uint64_t consumer_patch_input_mask = 0;
+
+ nir_foreach_variable(variable, &consumer->inputs) {
+ uint64_t mask = get_linked_variable_io_mask(variable, consumer->info.stage);
+ uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
+
+ if (variable->data.patch)
+ consumer_patch_input_mask |= mask << loc;
+ else
+ consumer_input_mask |= mask << loc;
+ }
+
+ uint64_t io_mask = producer_output_mask | consumer_input_mask;
+ uint64_t patch_io_mask = producer_patch_output_mask | consumer_patch_input_mask;
+
+ nir_foreach_variable(variable, &producer->outputs) {
+ uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
+
+ if (variable->data.patch)
+ variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc)) * 4;
+ else
+ variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc)) * 4;
+ }
+
+ nir_foreach_variable(variable, &consumer->inputs) {
+ uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
+
+ if (variable->data.patch)
+ variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc)) * 4;
+ else
+ variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc)) * 4;
+ }
+
+ nir_linked_io_var_info result = {
+ .num_linked_io_vars = util_bitcount64(io_mask),
+ .num_linked_patch_io_vars = util_bitcount64(patch_io_mask),
+ };
+
+ return result;
+}