nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- nir_shader *softfp64 = NULL;
- if ((options->lower_doubles_options & nir_lower_fp64_full_software) &&
- nir->info.uses_64bit) {
- softfp64 = glsl_float64_funcs_to_nir(ctx, options);
- ralloc_steal(ralloc_parent(nir), softfp64);
+ if (!ctx->SoftFP64 && nir->info.uses_64bit &&
+ (options->lower_doubles_options & nir_lower_fp64_full_software)) {
+ ctx->SoftFP64 = glsl_float64_funcs_to_nir(ctx, options);
}
- brw_preprocess_nir(brw->screen->compiler, nir, softfp64);
+ brw_preprocess_nir(brw->screen->compiler, nir, ctx->SoftFP64);
if (stage == MESA_SHADER_TESS_CTRL) {
/* Lower gl_PatchVerticesIn from a sys. value to a uniform on Gen8+. */
if (destroy_compiler_types)
_mesa_destroy_shader_compiler_types();
+ ralloc_free(ctx->SoftFP64);
+
/* unbind the context if it's currently bound */
if (ctx == _mesa_get_current_context()) {
_mesa_make_current(NULL, NULL, NULL);
*/
struct gl_pipeline_object *_Shader;
+ /**
+ * NIR containing the functions that implement software fp64 support.
+ */
+ struct nir_shader *SoftFP64;
+
struct gl_query_state Query; /**< occlusion, timer queries */
struct gl_transform_feedback_state TransformFeedback;
}
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- nir_shader *softfp64 = NULL;
- if (nir->info.uses_64bit &&
+ if (!st->ctx->SoftFP64 && nir->info.uses_64bit &&
(options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
- softfp64 = glsl_float64_funcs_to_nir(st->ctx, options);
- ralloc_steal(ralloc_parent(nir), softfp64);
+ st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
}
nir_variable_mode mask =
}
if (options->lower_doubles_options) {
NIR_PASS(progress, nir, nir_lower_doubles,
- softfp64, options->lower_doubles_options);
+ st->ctx->SoftFP64, options->lower_doubles_options);
}
NIR_PASS(progress, nir, nir_opt_algebraic);
lowered_64bit_ops |= progress;