intel/nir: Use the OPT macro for more passes
authorJason Ekstrand <jason.ekstrand@intel.com>
Fri, 19 Oct 2018 17:06:36 +0000 (12:06 -0500)
committerJason Ekstrand <jason.ekstrand@intel.com>
Fri, 26 Oct 2018 16:45:29 +0000 (11:45 -0500)
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
src/intel/compiler/brw_nir.c

index 1cd568615789095da709373306d5f8a2b5d0a91f..cf5a4a96d678ff46f7603ac725d20cfe6978d85b 100644 (file)
@@ -674,7 +674,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
    /* Lower int64 instructions before nir_optimize so that loop unrolling
     * sees their actual cost.
     */
-   nir_lower_int64(nir, nir_lower_imul64 |
+   OPT(nir_lower_int64, nir_lower_imul64 |
                         nir_lower_isign64 |
                         nir_lower_divmod64);
 
@@ -687,7 +687,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
       OPT(nir_opt_large_constants, NULL, 32);
    }
 
-   nir_lower_bit_size(nir, lower_bit_size_callback, NULL);
+   OPT(nir_lower_bit_size, lower_bit_size_callback, NULL);
 
    if (is_scalar) {
       OPT(nir_lower_load_const_to_scalar);
@@ -712,7 +712,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
 
    nir_variable_mode indirect_mask =
       brw_nir_no_indirect_mask(compiler, nir->info.stage);
-   nir_lower_indirect_derefs(nir, indirect_mask);
+   OPT(nir_lower_indirect_derefs, indirect_mask);
 
    /* Get rid of split copies */
    nir = brw_nir_optimize(nir, compiler, is_scalar, false);