2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_compiler.h"
25 #include "brw_shader.h"
27 #include "common/gen_debug.h"
28 #include "compiler/nir/nir.h"
29 #include "main/errors.h"
30 #include "util/debug.h"
32 #define COMMON_OPTIONS \
36 .lower_flrp16 = true, \
37 .lower_fmod16 = true, \
38 .lower_fmod32 = true, \
39 .lower_fmod64 = false, \
40 .lower_bitfield_extract = true, \
41 .lower_bitfield_insert = true, \
42 .lower_uadd_carry = true, \
43 .lower_usub_borrow = true, \
45 .lower_flrp64 = true, \
46 .lower_isign = true, \
47 .lower_ldexp = true, \
48 .lower_cs_local_id_from_index = true, \
49 .lower_device_index_to_zero = true, \
50 .native_integers = true, \
51 .use_interpolated_input_intrinsics = true, \
52 .vertex_id_zero_based = true, \
53 .lower_base_vertex = true
55 #define COMMON_SCALAR_OPTIONS \
56 .lower_pack_half_2x16 = true, \
57 .lower_pack_snorm_2x16 = true, \
58 .lower_pack_snorm_4x8 = true, \
59 .lower_pack_unorm_2x16 = true, \
60 .lower_pack_unorm_4x8 = true, \
61 .lower_unpack_half_2x16 = true, \
62 .lower_unpack_snorm_2x16 = true, \
63 .lower_unpack_snorm_4x8 = true, \
64 .lower_unpack_unorm_2x16 = true, \
65 .lower_unpack_unorm_4x8 = true, \
66 .max_unroll_iterations = 32
68 static const struct nir_shader_compiler_options scalar_nir_options
= {
70 COMMON_SCALAR_OPTIONS
,
73 static const struct nir_shader_compiler_options scalar_nir_options_gen11
= {
75 COMMON_SCALAR_OPTIONS
,
79 static const struct nir_shader_compiler_options vector_nir_options
= {
82 /* In the vec4 backend, our dpN instruction replicates its result to all the
83 * components of a vec4. We would like NIR to give us replicated fdot
84 * instructions because it can optimize better for us.
86 .fdot_replicates
= true,
88 /* Prior to Gen6, there are no three source operations for SIMD4x2. */
91 .lower_pack_snorm_2x16
= true,
92 .lower_pack_unorm_2x16
= true,
93 .lower_unpack_snorm_2x16
= true,
94 .lower_unpack_unorm_2x16
= true,
95 .lower_extract_byte
= true,
96 .lower_extract_word
= true,
97 .max_unroll_iterations
= 32,
100 static const struct nir_shader_compiler_options vector_nir_options_gen6
= {
103 /* In the vec4 backend, our dpN instruction replicates its result to all the
104 * components of a vec4. We would like NIR to give us replicated fdot
105 * instructions because it can optimize better for us.
107 .fdot_replicates
= true,
109 .lower_pack_snorm_2x16
= true,
110 .lower_pack_unorm_2x16
= true,
111 .lower_unpack_snorm_2x16
= true,
112 .lower_unpack_unorm_2x16
= true,
113 .lower_extract_byte
= true,
114 .lower_extract_word
= true,
115 .max_unroll_iterations
= 32,
118 struct brw_compiler
*
119 brw_compiler_create(void *mem_ctx
, const struct gen_device_info
*devinfo
)
121 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
123 compiler
->devinfo
= devinfo
;
125 brw_fs_alloc_reg_sets(compiler
);
126 brw_vec4_alloc_reg_set(compiler
);
127 brw_init_compaction_tables(devinfo
);
129 compiler
->precise_trig
= env_var_as_boolean("INTEL_PRECISE_TRIG", false);
131 if (devinfo
->gen
>= 10) {
132 /* We don't support vec4 mode on Cannonlake. */
133 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_STAGES
; i
++)
134 compiler
->scalar_stage
[i
] = true;
136 compiler
->scalar_stage
[MESA_SHADER_VERTEX
] =
137 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
138 compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
] =
139 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
140 compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
] =
141 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
142 compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
] =
143 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
144 compiler
->scalar_stage
[MESA_SHADER_FRAGMENT
] = true;
145 compiler
->scalar_stage
[MESA_SHADER_COMPUTE
] = true;
148 nir_lower_int64_options int64_options
=
152 nir_lower_imul_high64
;
153 nir_lower_doubles_options fp64_options
=
161 nir_lower_dround_even
|
164 if (!devinfo
->has_64bit_types
|| (INTEL_DEBUG
& DEBUG_SOFT64
)) {
165 int64_options
|= nir_lower_mov64
|
173 fp64_options
|= nir_lower_fp64_full_software
;
176 /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
177 * destination type can be Quadword and source type Doubleword for Gen8 and
178 * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
180 if (devinfo
->gen
< 8 || devinfo
->gen
> 9)
181 int64_options
|= nir_lower_imul_2x32_64
;
183 /* We want the GLSL compiler to emit code that uses condition codes */
184 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
185 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 0;
186 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
187 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
189 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
190 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
192 bool is_scalar
= compiler
->scalar_stage
[i
];
194 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
195 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
196 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
198 struct nir_shader_compiler_options
*nir_options
=
199 rzalloc(compiler
, struct nir_shader_compiler_options
);
202 devinfo
->gen
< 11 ? scalar_nir_options
: scalar_nir_options_gen11
;
205 devinfo
->gen
< 6 ? vector_nir_options
: vector_nir_options_gen6
;
207 nir_options
->lower_int64_options
= int64_options
;
208 nir_options
->lower_doubles_options
= fp64_options
;
209 compiler
->glsl_compiler_options
[i
].NirOptions
= nir_options
;
211 compiler
->glsl_compiler_options
[i
].ClampBlockIndicesToArrayBounds
= true;
214 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectInput
= false;
215 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_EVAL
].EmitNoIndirectInput
= false;
216 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectOutput
= false;
218 if (compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
])
219 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].EmitNoIndirectInput
= false;
225 insert_u64_bit(uint64_t *val
, bool add
)
227 *val
= (*val
<< 1) | !!add
;
231 brw_get_compiler_config_value(const struct brw_compiler
*compiler
)
234 insert_u64_bit(&config
, compiler
->precise_trig
);
235 if (compiler
->devinfo
->gen
>= 8 && compiler
->devinfo
->gen
< 10) {
236 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_VERTEX
]);
237 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
]);
238 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
]);
239 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
]);
241 uint64_t debug_bits
= INTEL_DEBUG
;
242 uint64_t mask
= DEBUG_DISK_CACHE_MASK
;
244 const uint64_t bit
= 1ULL << (ffsll(mask
) - 1);
245 insert_u64_bit(&config
, (debug_bits
& bit
) != 0);
252 brw_prog_data_size(gl_shader_stage stage
)
254 STATIC_ASSERT(MESA_SHADER_VERTEX
== 0);
255 STATIC_ASSERT(MESA_SHADER_TESS_CTRL
== 1);
256 STATIC_ASSERT(MESA_SHADER_TESS_EVAL
== 2);
257 STATIC_ASSERT(MESA_SHADER_GEOMETRY
== 3);
258 STATIC_ASSERT(MESA_SHADER_FRAGMENT
== 4);
259 STATIC_ASSERT(MESA_SHADER_COMPUTE
== 5);
260 static const size_t stage_sizes
[] = {
261 sizeof(struct brw_vs_prog_data
),
262 sizeof(struct brw_tcs_prog_data
),
263 sizeof(struct brw_tes_prog_data
),
264 sizeof(struct brw_gs_prog_data
),
265 sizeof(struct brw_wm_prog_data
),
266 sizeof(struct brw_cs_prog_data
),
268 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
269 return stage_sizes
[stage
];
273 brw_prog_key_size(gl_shader_stage stage
)
275 static const size_t stage_sizes
[] = {
276 sizeof(struct brw_vs_prog_key
),
277 sizeof(struct brw_tcs_prog_key
),
278 sizeof(struct brw_tes_prog_key
),
279 sizeof(struct brw_gs_prog_key
),
280 sizeof(struct brw_wm_prog_key
),
281 sizeof(struct brw_cs_prog_key
),
283 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
284 return stage_sizes
[stage
];