nir_lower_io_arrays_to_elements(ordered_shaders[i],
ordered_shaders[i - 1]);
- if (nir_link_constant_varyings(ordered_shaders[i],
- ordered_shaders[i - 1]))
+ if (nir_link_opt_varyings(ordered_shaders[i],
+ ordered_shaders[i - 1]))
radv_optimize_nir(ordered_shaders[i - 1], false, false);
nir_remove_dead_variables(ordered_shaders[i],
void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
bool default_to_smooth_interp);
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
-bool nir_link_constant_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
}
bool
-nir_link_constant_varyings(nir_shader *producer, nir_shader *consumer)
+nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
{
/* TODO: Add support for more shader stage combinations */
if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
}
- if (nir_link_constant_varyings(*producer, *consumer))
+ if (nir_link_opt_varyings(*producer, *consumer))
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
{
nir_lower_io_arrays_to_elements(*producer, *consumer);
- if (nir_link_constant_varyings(*producer, *consumer))
+ if (nir_link_opt_varyings(*producer, *consumer))
st_nir_opts(*consumer, scalar);
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);