nir: Replace the scoped_memory barrier by a scoped_barrier
[mesa.git] / src / intel / compiler / brw_compiler.c
index e515559acb6a7b4e613604840be4be73da290610..c26b6eb3bbca0a5e86f103cba9d8e31571553fcb 100644 (file)
@@ -24,7 +24,7 @@
 #include "brw_compiler.h"
 #include "brw_shader.h"
 #include "brw_eu.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
 #include "compiler/nir/nir.h"
 #include "main/errors.h"
 #include "util/debug.h"
    .lower_sub = true,                                                         \
    .lower_fdiv = true,                                                        \
    .lower_scmp = true,                                                        \
-   .lower_fmod32 = true,                                                      \
-   .lower_fmod64 = false,                                                     \
+   .lower_flrp16 = true,                                                      \
+   .lower_fmod = true,                                                        \
    .lower_bitfield_extract = true,                                            \
    .lower_bitfield_insert = true,                                             \
    .lower_uadd_carry = true,                                                  \
    .lower_usub_borrow = true,                                                 \
    .lower_fdiv = true,                                                        \
    .lower_flrp64 = true,                                                      \
-   .native_integers = true,                                                   \
+   .lower_isign = true,                                                       \
+   .lower_ldexp = true,                                                       \
+   .lower_device_index_to_zero = true,                                        \
+   .vectorize_io = true,                                                      \
    .use_interpolated_input_intrinsics = true,                                 \
-   .vertex_id_zero_based = true
+   .vertex_id_zero_based = true,                                              \
+   .lower_base_vertex = true,                                                 \
+   .use_scoped_barrier = true,                                                \
+   .support_8bit_alu = true,                                                  \
+   .support_16bit_alu = true
+
+#define COMMON_SCALAR_OPTIONS                                                 \
+   .lower_to_scalar = true,                                                   \
+   .lower_pack_half_2x16 = true,                                              \
+   .lower_pack_snorm_2x16 = true,                                             \
+   .lower_pack_snorm_4x8 = true,                                              \
+   .lower_pack_unorm_2x16 = true,                                             \
+   .lower_pack_unorm_4x8 = true,                                              \
+   .lower_unpack_half_2x16 = true,                                            \
+   .lower_unpack_snorm_2x16 = true,                                           \
+   .lower_unpack_snorm_4x8 = true,                                            \
+   .lower_unpack_unorm_2x16 = true,                                           \
+   .lower_unpack_unorm_4x8 = true,                                            \
+   .lower_usub_sat64 = true,                                                  \
+   .lower_hadd64 = true,                                                      \
+   .lower_bfe_with_two_constants = true,                                      \
+   .max_unroll_iterations = 32
 
 static const struct nir_shader_compiler_options scalar_nir_options = {
    COMMON_OPTIONS,
-   .lower_pack_half_2x16 = true,
-   .lower_pack_snorm_2x16 = true,
-   .lower_pack_snorm_4x8 = true,
-   .lower_pack_unorm_2x16 = true,
-   .lower_pack_unorm_4x8 = true,
-   .lower_unpack_half_2x16 = true,
-   .lower_unpack_snorm_2x16 = true,
-   .lower_unpack_snorm_4x8 = true,
-   .lower_unpack_unorm_2x16 = true,
-   .lower_unpack_unorm_4x8 = true,
-   .vs_inputs_dual_locations = true,
-   .max_unroll_iterations = 32,
+   COMMON_SCALAR_OPTIONS,
 };
 
 static const struct nir_shader_compiler_options vector_nir_options = {
@@ -70,35 +83,13 @@ static const struct nir_shader_compiler_options vector_nir_options = {
     */
    .fdot_replicates = true,
 
-   /* Prior to Gen6, there are no three source operations for SIMD4x2. */
-   .lower_flrp32 = true,
-
    .lower_pack_snorm_2x16 = true,
    .lower_pack_unorm_2x16 = true,
    .lower_unpack_snorm_2x16 = true,
    .lower_unpack_unorm_2x16 = true,
    .lower_extract_byte = true,
    .lower_extract_word = true,
-   .vs_inputs_dual_locations = true,
-   .max_unroll_iterations = 32,
-};
-
-static const struct nir_shader_compiler_options vector_nir_options_gen6 = {
-   COMMON_OPTIONS,
-
-   /* In the vec4 backend, our dpN instruction replicates its result to all the
-    * components of a vec4.  We would like NIR to give us replicated fdot
-    * instructions because it can optimize better for us.
-    */
-   .fdot_replicates = true,
-
-   .lower_pack_snorm_2x16 = true,
-   .lower_pack_unorm_2x16 = true,
-   .lower_unpack_snorm_2x16 = true,
-   .lower_unpack_unorm_2x16 = true,
-   .lower_extract_byte = true,
-   .lower_extract_word = true,
-   .vs_inputs_dual_locations = true,
+   .intel_vec4 = true,
    .max_unroll_iterations = 32,
 };
 
@@ -115,6 +106,10 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
 
    compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);
 
+   compiler->use_tcs_8_patch =
+      devinfo->gen >= 12 ||
+      (devinfo->gen >= 9 && (INTEL_DEBUG & DEBUG_TCS_EIGHT_PATCH));
+
    if (devinfo->gen >= 10) {
       /* We don't support vec4 mode on Cannonlake. */
       for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++)
@@ -132,6 +127,44 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
       compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
    }
 
+   nir_lower_int64_options int64_options =
+      nir_lower_imul64 |
+      nir_lower_isign64 |
+      nir_lower_divmod64 |
+      nir_lower_imul_high64;
+   nir_lower_doubles_options fp64_options =
+      nir_lower_drcp |
+      nir_lower_dsqrt |
+      nir_lower_drsq |
+      nir_lower_dtrunc |
+      nir_lower_dfloor |
+      nir_lower_dceil |
+      nir_lower_dfract |
+      nir_lower_dround_even |
+      nir_lower_dmod |
+      nir_lower_dsub |
+      nir_lower_ddiv;
+
+   if (!devinfo->has_64bit_float || (INTEL_DEBUG & DEBUG_SOFT64)) {
+      int64_options |= nir_lower_mov64 |
+                       nir_lower_icmp64 |
+                       nir_lower_iadd64 |
+                       nir_lower_iabs64 |
+                       nir_lower_ineg64 |
+                       nir_lower_logic64 |
+                       nir_lower_minmax64 |
+                       nir_lower_shift64 |
+                       nir_lower_extract64;
+      fp64_options |= nir_lower_fp64_full_software;
+   }
+
+   /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
+    * destination type can be Quadword and source type Doubleword for Gen8 and
+    * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
+    */
+   if (devinfo->gen < 8 || devinfo->gen > 9)
+      int64_options |= nir_lower_imul_2x32_64;
+
    /* We want the GLSL compiler to emit code that uses condition codes */
    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
       compiler->glsl_compiler_options[i].MaxUnrollIterations = 0;
@@ -147,14 +180,31 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
       compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
       compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
 
+      struct nir_shader_compiler_options *nir_options =
+         rzalloc(compiler, struct nir_shader_compiler_options);
       if (is_scalar) {
-         compiler->glsl_compiler_options[i].NirOptions = &scalar_nir_options;
+         *nir_options = scalar_nir_options;
       } else {
-         compiler->glsl_compiler_options[i].NirOptions =
-            devinfo->gen < 6 ? &vector_nir_options : &vector_nir_options_gen6;
+         *nir_options = vector_nir_options;
       }
 
-      compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true;
+      /* Prior to Gen6, there are no three source operations, and Gen11 loses
+       * LRP.
+       */
+      nir_options->lower_ffma = devinfo->gen < 6;
+      nir_options->lower_flrp32 = devinfo->gen < 6 || devinfo->gen >= 11;
+      nir_options->lower_fpow = devinfo->gen >= 12;
+
+      nir_options->lower_rotate = devinfo->gen < 11;
+      nir_options->lower_bitfield_reverse = devinfo->gen < 7;
+
+      nir_options->lower_int64_options = int64_options;
+      nir_options->lower_doubles_options = fp64_options;
+
+      nir_options->unify_interfaces = i < MESA_SHADER_FRAGMENT;
+
+      compiler->glsl_compiler_options[i].NirOptions = nir_options;
+
       compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
    }
 
@@ -168,6 +218,33 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
    return compiler;
 }
 
+static void
+insert_u64_bit(uint64_t *val, bool add)
+{
+   *val = (*val << 1) | !!add;
+}
+
+uint64_t
+brw_get_compiler_config_value(const struct brw_compiler *compiler)
+{
+   uint64_t config = 0;
+   insert_u64_bit(&config, compiler->precise_trig);
+   if (compiler->devinfo->gen >= 8 && compiler->devinfo->gen < 10) {
+      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_VERTEX]);
+      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_CTRL]);
+      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_EVAL]);
+      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
+   }
+   uint64_t debug_bits = INTEL_DEBUG;
+   uint64_t mask = DEBUG_DISK_CACHE_MASK;
+   while (mask != 0) {
+      const uint64_t bit = 1ULL << (ffsll(mask) - 1);
+      insert_u64_bit(&config, (debug_bits & bit) != 0);
+      mask &= ~bit;
+   }
+   return config;
+}
+
 unsigned
 brw_prog_data_size(gl_shader_stage stage)
 {