intel/compiler: Move int64/doubles lowering options
authorJordan Justen <jordan.l.justen@intel.com>
Tue, 26 Feb 2019 01:17:29 +0000 (17:17 -0800)
committerJordan Justen <jordan.l.justen@intel.com>
Sat, 2 Mar 2019 22:33:44 +0000 (14:33 -0800)
Instead of calculating the int64 and doubles lowering options each
time a shader is preprocessed, save and use the values in
nir_shader_compiler_options.

Signed-off-by: Jordan Justen <jordan.l.justen@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
src/intel/compiler/brw_compiler.c
src/intel/compiler/brw_nir.c

index 35ab31df39a4a6388634c608b00cec3b1345ad23..b3df0d9fa23bd1060be66bd8bf0885cabab15d1b 100644 (file)
@@ -143,6 +143,34 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
       compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
    }
 
+   nir_lower_int64_options int64_options =
+      nir_lower_imul64 |
+      nir_lower_isign64 |
+      nir_lower_divmod64 |
+      nir_lower_imul_high64;
+   nir_lower_doubles_options fp64_options =
+      nir_lower_drcp |
+      nir_lower_dsqrt |
+      nir_lower_drsq |
+      nir_lower_dtrunc |
+      nir_lower_dfloor |
+      nir_lower_dceil |
+      nir_lower_dfract |
+      nir_lower_dround_even |
+      nir_lower_dmod;
+
+   if (!devinfo->has_64bit_types) {
+      int64_options |= nir_lower_mov64 |
+                       nir_lower_icmp64 |
+                       nir_lower_iadd64 |
+                       nir_lower_iabs64 |
+                       nir_lower_ineg64 |
+                       nir_lower_logic64 |
+                       nir_lower_minmax64 |
+                       nir_lower_shift64;
+      fp64_options |= nir_lower_fp64_full_software;
+   }
+
    /* We want the GLSL compiler to emit code that uses condition codes */
    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
       compiler->glsl_compiler_options[i].MaxUnrollIterations = 0;
@@ -158,13 +186,18 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
       compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
       compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
 
+      struct nir_shader_compiler_options *nir_options =
+         rzalloc(compiler, struct nir_shader_compiler_options);
       if (is_scalar) {
-         compiler->glsl_compiler_options[i].NirOptions =
-            devinfo->gen < 11 ? &scalar_nir_options : &scalar_nir_options_gen11;
+         *nir_options =
+            devinfo->gen < 11 ? scalar_nir_options : scalar_nir_options_gen11;
       } else {
-         compiler->glsl_compiler_options[i].NirOptions =
-            devinfo->gen < 6 ? &vector_nir_options : &vector_nir_options_gen6;
+         *nir_options =
+            devinfo->gen < 6 ? vector_nir_options : vector_nir_options_gen6;
       }
+      nir_options->lower_int64_options = int64_options;
+      nir_options->lower_doubles_options = fp64_options;
+      compiler->glsl_compiler_options[i].NirOptions = nir_options;
 
       compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true;
       compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
index 00e3879b4dc67ba43dcc8f89773cb568bdb0751a..786f1298f22637eb10b8e185d32063acaf391fc9 100644 (file)
@@ -672,40 +672,12 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
    /* Lower 64-bit operations before nir_optimize so that loop unrolling sees
     * their actual cost.
     */
-   nir_lower_int64_options int64_options =
-      nir_lower_imul64 |
-      nir_lower_isign64 |
-      nir_lower_divmod64 |
-      nir_lower_imul_high64;
-   nir_lower_doubles_options fp64_options =
-      nir_lower_drcp |
-      nir_lower_dsqrt |
-      nir_lower_drsq |
-      nir_lower_dtrunc |
-      nir_lower_dfloor |
-      nir_lower_dceil |
-      nir_lower_dfract |
-      nir_lower_dround_even |
-      nir_lower_dmod;
-
-   if (!devinfo->has_64bit_types) {
-      int64_options |= nir_lower_mov64 |
-                       nir_lower_icmp64 |
-                       nir_lower_iadd64 |
-                       nir_lower_iabs64 |
-                       nir_lower_ineg64 |
-                       nir_lower_logic64 |
-                       nir_lower_minmax64 |
-                       nir_lower_shift64;
-      fp64_options |= nir_lower_fp64_full_software;
-   }
-
    bool lowered_64bit_ops = false;
    do {
       progress = false;
 
-      OPT(nir_lower_int64, int64_options);
-      OPT(nir_lower_doubles, fp64_options);
+      OPT(nir_lower_int64, nir->options->lower_int64_options);
+      OPT(nir_lower_doubles, nir->options->lower_doubles_options);
 
       /* Necessary to lower add -> sub and div -> mul/rcp */
       OPT(nir_opt_algebraic);