assert(var->data.location >= 0);
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
return ((1ull << slots) - 1) << location;
}
+static uint8_t
+get_num_components(nir_variable *var)
+{
+ if (glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
+ return 4;
+
+ return glsl_get_vector_elements(glsl_without_array(var->type));
+}
+
static void
tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
{
continue;
nir_variable *var = nir_deref_instr_get_variable(deref);
- if (var->data.patch) {
- patches_read[var->data.location_frac] |=
- get_variable_io_mask(var, shader->info.stage);
- } else {
- read[var->data.location_frac] |=
- get_variable_io_mask(var, shader->info.stage);
+ for (unsigned i = 0; i < get_num_components(var); i++) {
+ if (var->data.patch) {
+ patches_read[var->data.location_frac + i] |=
+ get_variable_io_mask(var, shader->info.stage);
+ } else {
+ read[var->data.location_frac + i] |=
+ get_variable_io_mask(var, shader->info.stage);
+ }
}
}
}
uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
nir_foreach_variable(var, &producer->outputs) {
- if (var->data.patch) {
- patches_written[var->data.location_frac] |=
- get_variable_io_mask(var, producer->info.stage);
- } else {
- written[var->data.location_frac] |=
- get_variable_io_mask(var, producer->info.stage);
+ for (unsigned i = 0; i < get_num_components(var); i++) {
+ if (var->data.patch) {
+ patches_written[var->data.location_frac + i] |=
+ get_variable_io_mask(var, producer->info.stage);
+ } else {
+ written[var->data.location_frac + i] |=
+ get_variable_io_mask(var, producer->info.stage);
+ }
}
}
nir_foreach_variable(var, &consumer->inputs) {
- if (var->data.patch) {
- patches_read[var->data.location_frac] |=
- get_variable_io_mask(var, consumer->info.stage);
- } else {
- read[var->data.location_frac] |=
- get_variable_io_mask(var, consumer->info.stage);
+ for (unsigned i = 0; i < get_num_components(var); i++) {
+ if (var->data.patch) {
+ patches_read[var->data.location_frac + i] |=
+ get_variable_io_mask(var, consumer->info.stage);
+ } else {
+ read[var->data.location_frac + i] |=
+ get_variable_io_mask(var, consumer->info.stage);
+ }
}
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
comps[location + i].interp_type =
get_interp_type(var, type, default_to_smooth_interp);
comps[location + i].interp_loc = get_interp_loc(var);
- comps[location + i].is_32bit = glsl_type_is_32bit(type);
+ comps[location + i].is_32bit =
+ glsl_type_is_32bit(glsl_without_array(type));
}
}
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
uint8_t interp_loc;
bool is_32bit;
bool is_patch;
+ bool is_intra_stage_only;
bool initialised;
};
if (comp1->is_patch != comp2->is_patch)
return comp1->is_patch ? 1 : -1;
+ /* We want to try to group together TCS outputs that are only read by other
+ * TCS invocations and not consumed by the follow stage.
+ */
+ if (comp1->is_intra_stage_only != comp2->is_intra_stage_only)
+ return comp1->is_intra_stage_only ? 1 : -1;
+
/* We can only pack varyings with matching interpolation types so group
* them together.
*/
}
static void
-gather_varying_component_info(nir_shader *consumer,
+gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
struct varying_component **varying_comp_info,
unsigned *varying_comp_info_size,
bool default_to_smooth_interp)
{
- unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = {0};
+ unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = {{0}};
unsigned num_of_comps_to_pack = 0;
/* Count the number of varying that can be packed and create a mapping
* of those varyings to the array we will pass to qsort.
*/
- nir_foreach_variable(var, &consumer->inputs) {
+ nir_foreach_variable(var, &producer->outputs) {
/* Only remap things that aren't builtins. */
if (var->data.location >= VARYING_SLOT_VAR0 &&
continue;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, consumer->info.stage)) {
+ if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (intr->intrinsic != nir_intrinsic_load_deref &&
intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
- intr->intrinsic != nir_intrinsic_interp_deref_at_offset)
+ intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
+ intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
continue;
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
if (!vc_info->initialised) {
const struct glsl_type *type = in_var->type;
- if (nir_is_per_vertex_io(in_var, consumer->info.stage)) {
+ if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
+ in_var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
vc_info->interp_loc = get_interp_loc(in_var);
vc_info->is_32bit = glsl_type_is_32bit(type);
vc_info->is_patch = in_var->data.patch;
+ vc_info->is_intra_stage_only = false;
+ vc_info->initialised = true;
+ }
+ }
+ }
+
+ /* Walk over the shader and populate the varying component info array
+ * for varyings which are read by other TCS instances but are not consumed
+ * by the TES.
+ */
+ if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
+ impl = nir_shader_get_entrypoint(producer);
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ if (intr->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
+ if (deref->mode != nir_var_shader_out)
+ continue;
+
+ /* We only remap things that aren't builtins. */
+ nir_variable *out_var = nir_deref_instr_get_variable(deref);
+ if (out_var->data.location < VARYING_SLOT_VAR0)
+ continue;
+
+ unsigned location = out_var->data.location - VARYING_SLOT_VAR0;
+ if (location >= MAX_VARYINGS_INCL_PATCH)
+ continue;
+
+ unsigned var_info_idx =
+ store_varying_info_idx[location][out_var->data.location_frac];
+ if (!var_info_idx) {
+ /* Something went wrong, the shader interfaces didn't match, so
+ * abandon packing. This can happen for example when the
+ * inputs are scalars but the outputs are struct members.
+ */
+ *varying_comp_info_size = 0;
+ break;
+ }
+
+ struct varying_component *vc_info =
+ &(*varying_comp_info)[var_info_idx-1];
+
+ if (!vc_info->initialised) {
+ const struct glsl_type *type = out_var->type;
+ if (nir_is_per_vertex_io(out_var, producer->info.stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ vc_info->var = out_var;
+ vc_info->interp_type =
+ get_interp_type(out_var, type, default_to_smooth_interp);
+ vc_info->interp_loc = get_interp_loc(out_var);
+ vc_info->is_32bit = glsl_type_is_32bit(type);
+ vc_info->is_patch = out_var->data.patch;
+ vc_info->is_intra_stage_only = true;
+ vc_info->initialised = true;
+ }
}
}
}
+
+ for (unsigned i = 0; i < *varying_comp_info_size; i++ ) {
+ struct varying_component *vc_info = &(*varying_comp_info)[i];
+ if (!vc_info->initialised) {
+ /* Something went wrong, the shader interfaces didn't match, so
+ * abandon packing. This can happen for example when the outputs are
+ * scalars but the inputs are struct members.
+ */
+ *varying_comp_info_size = 0;
+ break;
+ }
+ }
}
static void
unsigned varying_comp_info_size;
/* Gather varying component info */
- gather_varying_component_info(consumer, &varying_comp_info,
+ gather_varying_component_info(producer, consumer, &varying_comp_info,
&varying_comp_info_size,
default_to_smooth_interp);
assert(producer->info.stage != MESA_SHADER_FRAGMENT);
assert(consumer->info.stage != MESA_SHADER_VERTEX);
- struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = {0};
+ struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = {{0}};
get_unmoveable_components_masks(&producer->outputs, assigned_comps,
producer->info.stage,
return progress;
}
+
+/* TODO any better helper somewhere to sort a list? */
+
+static void
+insert_sorted(struct exec_list *var_list, nir_variable *new_var)
+{
+ nir_foreach_variable(var, var_list) {
+ if (var->data.location > new_var->data.location) {
+ exec_node_insert_node_before(&var->node, &new_var->node);
+ return;
+ }
+ }
+ exec_list_push_tail(var_list, &new_var->node);
+}
+
+static void
+sort_varyings(struct exec_list *var_list)
+{
+ struct exec_list new_list;
+ exec_list_make_empty(&new_list);
+ nir_foreach_variable_safe(var, var_list) {
+ exec_node_remove(&var->node);
+ insert_sorted(&new_list, var);
+ }
+ exec_list_move_nodes_to(&new_list, var_list);
+}
+
+void
+nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
+ gl_shader_stage stage)
+{
+ unsigned location = 0;
+ unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
+ uint64_t processed_locs[2] = {0};
+
+ sort_varyings(var_list);
+
+ int UNUSED last_loc = 0;
+ bool last_partial = false;
+ nir_foreach_variable(var, var_list) {
+ const struct glsl_type *type = var->type;
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ int base;
+ if (var->data.mode == nir_var_shader_in && stage == MESA_SHADER_VERTEX)
+ base = VERT_ATTRIB_GENERIC0;
+ else if (var->data.mode == nir_var_shader_out &&
+ stage == MESA_SHADER_FRAGMENT)
+ base = FRAG_RESULT_DATA0;
+ else
+ base = VARYING_SLOT_VAR0;
+
+ unsigned var_size;
+ if (var->data.compact) {
+ /* If we are inside a partial compact,
+ * don't allow another compact to be in this slot
+ * if it starts at component 0.
+ */
+ if (last_partial && var->data.location_frac == 0) {
+ location++;
+ }
+
+ /* compact variables must be arrays of scalars */
+ assert(glsl_type_is_array(type));
+ assert(glsl_type_is_scalar(glsl_get_array_element(type)));
+ unsigned start = 4 * location + var->data.location_frac;
+ unsigned end = start + glsl_get_length(type);
+ var_size = end / 4 - location;
+ last_partial = end % 4 != 0;
+ } else {
+ /* Compact variables bypass the normal varying compacting pass,
+ * which means they cannot be in the same vec4 slot as a normal
+ * variable. If part of the current slot is taken up by a compact
+ * variable, we need to go to the next one.
+ */
+ if (last_partial) {
+ location++;
+ last_partial = false;
+ }
+ var_size = glsl_count_attribute_slots(type, false);
+ }
+
+ /* Builtins don't allow component packing so we only need to worry about
+ * user defined varyings sharing the same location.
+ */
+ bool processed = false;
+ if (var->data.location >= base) {
+ unsigned glsl_location = var->data.location - base;
+
+ for (unsigned i = 0; i < var_size; i++) {
+ if (processed_locs[var->data.index] &
+ ((uint64_t)1 << (glsl_location + i)))
+ processed = true;
+ else
+ processed_locs[var->data.index] |=
+ ((uint64_t)1 << (glsl_location + i));
+ }
+ }
+
+ /* Because component packing allows varyings to share the same location
+ * we may have already have processed this location.
+ */
+ if (processed) {
+ unsigned driver_location = assigned_locations[var->data.location];
+ var->data.driver_location = driver_location;
+
+ /* An array may be packed such that is crosses multiple other arrays
+ * or variables, we need to make sure we have allocated the elements
+ * consecutively if the previously proccessed var was shorter than
+ * the current array we are processing.
+ *
+ * NOTE: The code below assumes the var list is ordered in ascending
+ * location order.
+ */
+ assert(last_loc <= var->data.location);
+ last_loc = var->data.location;
+ unsigned last_slot_location = driver_location + var_size;
+ if (last_slot_location > location) {
+ unsigned num_unallocated_slots = last_slot_location - location;
+ unsigned first_unallocated_slot = var_size - num_unallocated_slots;
+ for (unsigned i = first_unallocated_slot; i < var_size; i++) {
+ assigned_locations[var->data.location + i] = location;
+ location++;
+ }
+ }
+ continue;
+ }
+
+ for (unsigned i = 0; i < var_size; i++) {
+ assigned_locations[var->data.location + i] = location + i;
+ }
+
+ var->data.driver_location = location;
+ location += var_size;
+ }
+
+ if (last_partial)
+ location++;
+
+ *size = location;
+}
+