intel/compiler: Allow MESA_SHADER_KERNEL
[mesa.git] / src / intel / compiler / brw_compiler.c
1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_compiler.h"
25 #include "brw_shader.h"
26 #include "brw_eu.h"
27 #include "dev/gen_debug.h"
28 #include "compiler/nir/nir.h"
29 #include "main/errors.h"
30 #include "util/debug.h"
31
32 #define COMMON_OPTIONS \
33 .lower_sub = true, \
34 .lower_fdiv = true, \
35 .lower_scmp = true, \
36 .lower_flrp16 = true, \
37 .lower_fmod = true, \
38 .lower_bitfield_extract = true, \
39 .lower_bitfield_insert = true, \
40 .lower_uadd_carry = true, \
41 .lower_usub_borrow = true, \
42 .lower_fdiv = true, \
43 .lower_flrp64 = true, \
44 .lower_isign = true, \
45 .lower_ldexp = true, \
46 .lower_device_index_to_zero = true, \
47 .vectorize_io = true, \
48 .use_interpolated_input_intrinsics = true, \
49 .vertex_id_zero_based = true, \
50 .lower_base_vertex = true, \
51 .use_scoped_barrier = true, \
52 .support_8bit_alu = true, \
53 .support_16bit_alu = true
54
55 #define COMMON_SCALAR_OPTIONS \
56 .lower_to_scalar = true, \
57 .lower_pack_half_2x16 = true, \
58 .lower_pack_snorm_2x16 = true, \
59 .lower_pack_snorm_4x8 = true, \
60 .lower_pack_unorm_2x16 = true, \
61 .lower_pack_unorm_4x8 = true, \
62 .lower_unpack_half_2x16 = true, \
63 .lower_unpack_snorm_2x16 = true, \
64 .lower_unpack_snorm_4x8 = true, \
65 .lower_unpack_unorm_2x16 = true, \
66 .lower_unpack_unorm_4x8 = true, \
67 .lower_usub_sat64 = true, \
68 .lower_hadd64 = true, \
69 .lower_bfe_with_two_constants = true, \
70 .max_unroll_iterations = 32
71
72 static const struct nir_shader_compiler_options scalar_nir_options = {
73 COMMON_OPTIONS,
74 COMMON_SCALAR_OPTIONS,
75 };
76
77 static const struct nir_shader_compiler_options vector_nir_options = {
78 COMMON_OPTIONS,
79
80 /* In the vec4 backend, our dpN instruction replicates its result to all the
81 * components of a vec4. We would like NIR to give us replicated fdot
82 * instructions because it can optimize better for us.
83 */
84 .fdot_replicates = true,
85
86 .lower_pack_snorm_2x16 = true,
87 .lower_pack_unorm_2x16 = true,
88 .lower_unpack_snorm_2x16 = true,
89 .lower_unpack_unorm_2x16 = true,
90 .lower_extract_byte = true,
91 .lower_extract_word = true,
92 .intel_vec4 = true,
93 .max_unroll_iterations = 32,
94 };
95
96 struct brw_compiler *
97 brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
98 {
99 struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
100
101 compiler->devinfo = devinfo;
102
103 brw_fs_alloc_reg_sets(compiler);
104 brw_vec4_alloc_reg_set(compiler);
105 brw_init_compaction_tables(devinfo);
106
107 compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);
108
109 compiler->use_tcs_8_patch =
110 devinfo->gen >= 12 ||
111 (devinfo->gen >= 9 && (INTEL_DEBUG & DEBUG_TCS_EIGHT_PATCH));
112
113 if (devinfo->gen >= 10) {
114 /* We don't support vec4 mode on Cannonlake. */
115 for (int i = MESA_SHADER_VERTEX; i < MESA_ALL_SHADER_STAGES; i++)
116 compiler->scalar_stage[i] = true;
117 } else {
118 compiler->scalar_stage[MESA_SHADER_VERTEX] =
119 devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
120 compiler->scalar_stage[MESA_SHADER_TESS_CTRL] =
121 devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
122 compiler->scalar_stage[MESA_SHADER_TESS_EVAL] =
123 devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
124 compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
125 devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
126 compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
127 compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
128 }
129
130 nir_lower_int64_options int64_options =
131 nir_lower_imul64 |
132 nir_lower_isign64 |
133 nir_lower_divmod64 |
134 nir_lower_imul_high64;
135 nir_lower_doubles_options fp64_options =
136 nir_lower_drcp |
137 nir_lower_dsqrt |
138 nir_lower_drsq |
139 nir_lower_dtrunc |
140 nir_lower_dfloor |
141 nir_lower_dceil |
142 nir_lower_dfract |
143 nir_lower_dround_even |
144 nir_lower_dmod |
145 nir_lower_dsub |
146 nir_lower_ddiv;
147
148 if (!devinfo->has_64bit_float || (INTEL_DEBUG & DEBUG_SOFT64)) {
149 int64_options |= (nir_lower_int64_options)~0;
150 fp64_options |= nir_lower_fp64_full_software;
151 }
152
153 /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
154 * destination type can be Quadword and source type Doubleword for Gen8 and
155 * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
156 */
157 if (devinfo->gen < 8 || devinfo->gen > 9)
158 int64_options |= nir_lower_imul_2x32_64;
159
160 /* We want the GLSL compiler to emit code that uses condition codes */
161 for (int i = 0; i < MESA_ALL_SHADER_STAGES; i++) {
162 compiler->glsl_compiler_options[i].MaxUnrollIterations = 0;
163 compiler->glsl_compiler_options[i].MaxIfDepth =
164 devinfo->gen < 6 ? 16 : UINT_MAX;
165
166 compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
167 compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
168
169 bool is_scalar = compiler->scalar_stage[i];
170
171 compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
172 compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
173 compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
174
175 struct nir_shader_compiler_options *nir_options =
176 rzalloc(compiler, struct nir_shader_compiler_options);
177 if (is_scalar) {
178 *nir_options = scalar_nir_options;
179 } else {
180 *nir_options = vector_nir_options;
181 }
182
183 /* Prior to Gen6, there are no three source operations, and Gen11 loses
184 * LRP.
185 */
186 nir_options->lower_ffma = devinfo->gen < 6;
187 nir_options->lower_flrp32 = devinfo->gen < 6 || devinfo->gen >= 11;
188 nir_options->lower_fpow = devinfo->gen >= 12;
189
190 nir_options->lower_rotate = devinfo->gen < 11;
191 nir_options->lower_bitfield_reverse = devinfo->gen < 7;
192
193 nir_options->lower_int64_options = int64_options;
194 nir_options->lower_doubles_options = fp64_options;
195
196 nir_options->unify_interfaces = i < MESA_SHADER_FRAGMENT;
197
198 compiler->glsl_compiler_options[i].NirOptions = nir_options;
199
200 compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
201 }
202
203 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false;
204 compiler->glsl_compiler_options[MESA_SHADER_TESS_EVAL].EmitNoIndirectInput = false;
205 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectOutput = false;
206
207 if (compiler->scalar_stage[MESA_SHADER_GEOMETRY])
208 compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].EmitNoIndirectInput = false;
209
210 return compiler;
211 }
212
213 static void
214 insert_u64_bit(uint64_t *val, bool add)
215 {
216 *val = (*val << 1) | !!add;
217 }
218
219 uint64_t
220 brw_get_compiler_config_value(const struct brw_compiler *compiler)
221 {
222 uint64_t config = 0;
223 insert_u64_bit(&config, compiler->precise_trig);
224 if (compiler->devinfo->gen >= 8 && compiler->devinfo->gen < 10) {
225 insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_VERTEX]);
226 insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_CTRL]);
227 insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_EVAL]);
228 insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
229 }
230 uint64_t debug_bits = INTEL_DEBUG;
231 uint64_t mask = DEBUG_DISK_CACHE_MASK;
232 while (mask != 0) {
233 const uint64_t bit = 1ULL << (ffsll(mask) - 1);
234 insert_u64_bit(&config, (debug_bits & bit) != 0);
235 mask &= ~bit;
236 }
237 return config;
238 }
239
240 unsigned
241 brw_prog_data_size(gl_shader_stage stage)
242 {
243 static const size_t stage_sizes[] = {
244 [MESA_SHADER_VERTEX] = sizeof(struct brw_vs_prog_data),
245 [MESA_SHADER_TESS_CTRL] = sizeof(struct brw_tcs_prog_data),
246 [MESA_SHADER_TESS_EVAL] = sizeof(struct brw_tes_prog_data),
247 [MESA_SHADER_GEOMETRY] = sizeof(struct brw_gs_prog_data),
248 [MESA_SHADER_FRAGMENT] = sizeof(struct brw_wm_prog_data),
249 [MESA_SHADER_COMPUTE] = sizeof(struct brw_cs_prog_data),
250 [MESA_SHADER_KERNEL] = sizeof(struct brw_cs_prog_data),
251 };
252 assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
253 return stage_sizes[stage];
254 }
255
256 unsigned
257 brw_prog_key_size(gl_shader_stage stage)
258 {
259 static const size_t stage_sizes[] = {
260 [MESA_SHADER_VERTEX] = sizeof(struct brw_vs_prog_key),
261 [MESA_SHADER_TESS_CTRL] = sizeof(struct brw_tcs_prog_key),
262 [MESA_SHADER_TESS_EVAL] = sizeof(struct brw_tes_prog_key),
263 [MESA_SHADER_GEOMETRY] = sizeof(struct brw_gs_prog_key),
264 [MESA_SHADER_FRAGMENT] = sizeof(struct brw_wm_prog_key),
265 [MESA_SHADER_COMPUTE] = sizeof(struct brw_cs_prog_key),
266 [MESA_SHADER_KERNEL] = sizeof(struct brw_cs_prog_key),
267 };
268 assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
269 return stage_sizes[stage];
270 }