We've now hit literally every case other than geometry shaders (and
compute shaders, but those are a no-op). So, let's just move geometry
shaders over too and be done with it.
The only advantage to doing this at link time was to save the expense
of running the pass on recompiles. But we're already running a lot of
passes, and the extra code complexity isn't worth it.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
bool is_scalar)
{
struct gl_context *ctx = &brw->ctx;
- const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
const nir_shader_compiler_options *options =
ctx->Const.ShaderCompilerOptions[stage].NirOptions;
bool progress;
OPT_V(nir_lower_atomics, shader_prog);
}
- if (nir->stage != MESA_SHADER_VERTEX &&
- nir->stage != MESA_SHADER_TESS_CTRL &&
- nir->stage != MESA_SHADER_TESS_EVAL &&
- nir->stage != MESA_SHADER_FRAGMENT) {
- nir = brw_nir_lower_io(nir, devinfo, is_scalar, false, NULL);
- }
-
return nir;
}
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
is_scalar);
+ shader = brw_nir_lower_io(shader, compiler->devinfo, is_scalar, false, NULL);
shader = brw_postprocess_nir(shader, compiler->devinfo, is_scalar);
prog_data->include_primitive_id =