}
}
+struct remap_vs_attrs_params {
+ shader_info *nir_info;
+ bool is_scalar;
+};
+
static bool
-remap_vs_attrs(nir_block *block, shader_info *nir_info)
+remap_vs_attrs(nir_block *block, void *closure)
{
+ struct remap_vs_attrs_params *params =
+ (struct remap_vs_attrs_params *) closure;
+ shader_info *nir_info = params->nir_info;
+ bool is_scalar = params->is_scalar;
+
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
int attr = intrin->const_index[0];
int slot = _mesa_bitcount_64(nir_info->inputs_read &
BITFIELD64_MASK(attr));
- intrin->const_index[0] = 4 * slot;
+ intrin->const_index[0] = is_scalar ? 4 * slot : slot;
}
}
return true;
bool use_legacy_snorm_formula,
const uint8_t *vs_attrib_wa_flags)
{
+ struct remap_vs_attrs_params params = {
+ .nir_info = nir->info,
+ .is_scalar = is_scalar
+ };
+
/* Start with the location of the variable's base. */
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
vs_attrib_wa_flags);
- if (is_scalar) {
- /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
-
- nir_foreach_function(function, nir) {
- if (function->impl) {
- nir_foreach_block(block, function->impl) {
- remap_vs_attrs(block, nir->info);
- }
+ /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ nir_foreach_block(block, function->impl) {
+ remap_vs_attrs(block, ¶ms);
}
}
}