2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_compiler.h"
25 #include "brw_shader.h"
27 #include "common/gen_debug.h"
28 #include "compiler/nir/nir.h"
29 #include "main/errors.h"
30 #include "util/debug.h"
32 #define COMMON_OPTIONS \
36 .lower_fmod32 = true, \
37 .lower_fmod64 = false, \
38 .lower_bitfield_extract = true, \
39 .lower_bitfield_insert = true, \
40 .lower_uadd_carry = true, \
41 .lower_usub_borrow = true, \
43 .lower_flrp64 = true, \
44 .native_integers = true, \
45 .use_interpolated_input_intrinsics = true, \
46 .vertex_id_zero_based = true
48 static const struct nir_shader_compiler_options scalar_nir_options
= {
50 .lower_pack_half_2x16
= true,
51 .lower_pack_snorm_2x16
= true,
52 .lower_pack_snorm_4x8
= true,
53 .lower_pack_unorm_2x16
= true,
54 .lower_pack_unorm_4x8
= true,
55 .lower_unpack_half_2x16
= true,
56 .lower_unpack_snorm_2x16
= true,
57 .lower_unpack_snorm_4x8
= true,
58 .lower_unpack_unorm_2x16
= true,
59 .lower_unpack_unorm_4x8
= true,
60 .max_unroll_iterations
= 32,
63 static const struct nir_shader_compiler_options vector_nir_options
= {
66 /* In the vec4 backend, our dpN instruction replicates its result to all the
67 * components of a vec4. We would like NIR to give us replicated fdot
68 * instructions because it can optimize better for us.
70 .fdot_replicates
= true,
72 /* Prior to Gen6, there are no three source operations for SIMD4x2. */
75 .lower_pack_snorm_2x16
= true,
76 .lower_pack_unorm_2x16
= true,
77 .lower_unpack_snorm_2x16
= true,
78 .lower_unpack_unorm_2x16
= true,
79 .lower_extract_byte
= true,
80 .lower_extract_word
= true,
81 .lower_vote_trivial
= true,
82 .max_unroll_iterations
= 32,
85 static const struct nir_shader_compiler_options vector_nir_options_gen6
= {
88 /* In the vec4 backend, our dpN instruction replicates its result to all the
89 * components of a vec4. We would like NIR to give us replicated fdot
90 * instructions because it can optimize better for us.
92 .fdot_replicates
= true,
94 .lower_pack_snorm_2x16
= true,
95 .lower_pack_unorm_2x16
= true,
96 .lower_unpack_snorm_2x16
= true,
97 .lower_unpack_unorm_2x16
= true,
98 .lower_extract_byte
= true,
99 .lower_extract_word
= true,
100 .max_unroll_iterations
= 32,
103 struct brw_compiler
*
104 brw_compiler_create(void *mem_ctx
, const struct gen_device_info
*devinfo
)
106 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
108 compiler
->devinfo
= devinfo
;
110 brw_fs_alloc_reg_sets(compiler
);
111 brw_vec4_alloc_reg_set(compiler
);
112 brw_init_compaction_tables(devinfo
);
114 compiler
->precise_trig
= env_var_as_boolean("INTEL_PRECISE_TRIG", false);
116 if (devinfo
->gen
>= 10) {
117 /* We don't support vec4 mode on Cannonlake. */
118 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_STAGES
; i
++)
119 compiler
->scalar_stage
[i
] = true;
121 compiler
->scalar_stage
[MESA_SHADER_VERTEX
] =
122 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
123 compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
] =
124 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
125 compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
] =
126 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
127 compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
] =
128 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
129 compiler
->scalar_stage
[MESA_SHADER_FRAGMENT
] = true;
130 compiler
->scalar_stage
[MESA_SHADER_COMPUTE
] = true;
133 /* We want the GLSL compiler to emit code that uses condition codes */
134 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
135 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 0;
136 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
137 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
139 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
140 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
142 bool is_scalar
= compiler
->scalar_stage
[i
];
144 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
145 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
146 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
149 compiler
->glsl_compiler_options
[i
].NirOptions
= &scalar_nir_options
;
151 compiler
->glsl_compiler_options
[i
].NirOptions
=
152 devinfo
->gen
< 6 ? &vector_nir_options
: &vector_nir_options_gen6
;
155 compiler
->glsl_compiler_options
[i
].LowerBufferInterfaceBlocks
= true;
156 compiler
->glsl_compiler_options
[i
].ClampBlockIndicesToArrayBounds
= true;
159 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectInput
= false;
160 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_EVAL
].EmitNoIndirectInput
= false;
161 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectOutput
= false;
163 if (compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
])
164 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].EmitNoIndirectInput
= false;