brw_nir_optimize(nir, compiler, is_scalar, true);
- bool lowered_64bit_ops = false;
- do {
- progress = false;
-
- OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
-
- /* Necessary to lower add -> sub and div -> mul/rcp */
- OPT(nir_opt_algebraic);
-
- lowered_64bit_ops |= progress;
- } while (progress);
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
/* This needs to be run after the first optimization pass but before we
* lower indirect derefs away
if (lower_64bit) {
bool lowered_64bit_ops = false;
- bool progress = false;
-
- NIR_PASS_V(nir, nir_opt_algebraic);
-
- do {
- progress = false;
- if (options->lower_int64_options) {
- NIR_PASS(progress, nir, nir_lower_int64,
- options->lower_int64_options);
- }
- if (options->lower_doubles_options) {
- NIR_PASS(progress, nir, nir_lower_doubles,
- st->ctx->SoftFP64, options->lower_doubles_options);
- }
- NIR_PASS(progress, nir, nir_opt_algebraic);
- lowered_64bit_ops |= progress;
- } while (progress);
+ if (options->lower_doubles_options) {
+ NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
+ st->ctx->SoftFP64, options->lower_doubles_options);
+ }
+ if (options->lower_int64_options) {
+ NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
+ options->lower_int64_options);
+ }
if (lowered_64bit_ops)
st_nir_opts(nir, is_scalar);