/* before buffers and vars_to_ssa */
NIR_PASS_V(nir, gl_nir_lower_bindless_images);
- st_nir_opts(nir);
/* TODO: Change GLSL to not lower shared memory. */
if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
NIR_PASS_V(nir, nir_opt_constant_folding);
if (lower_64bit) {
+ /* Clean up the IR before 64-bit lowering. */
+ st_nir_opts(nir);
+
bool lowered_64bit_ops = false;
if (options->lower_doubles_options) {
NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
{
struct st_context *st = st_context(ctx);
struct pipe_screen *screen = st->pipe->screen;
+ unsigned num_linked_shaders = 0;
unsigned last_stage = 0;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (shader == NULL)
continue;
+ num_linked_shaders++;
+
const nir_shader_compiler_options *options =
st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
struct gl_program *prog = shader->Program;
nir_shader *nir = shader->Program->nir;
+ /* Linked shaders are optimized in st_nir_link_shaders. Separate shaders
+ * and shaders with a fixed-func VS or FS are optimized here.
+ */
+ if (num_linked_shaders == 1)
+ st_nir_opts(nir);
+
NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
st->pipe->screen);