2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_compiler.h"
25 #include "brw_shader.h"
27 #include "dev/gen_debug.h"
28 #include "compiler/nir/nir.h"
29 #include "main/errors.h"
30 #include "util/debug.h"
32 #define COMMON_OPTIONS \
36 .lower_flrp16 = true, \
38 .lower_bitfield_extract = true, \
39 .lower_bitfield_insert = true, \
40 .lower_uadd_carry = true, \
41 .lower_usub_borrow = true, \
43 .lower_flrp64 = true, \
44 .lower_isign = true, \
45 .lower_ldexp = true, \
46 .lower_device_index_to_zero = true, \
47 .vectorize_io = true, \
48 .use_interpolated_input_intrinsics = true, \
49 .vertex_id_zero_based = true, \
50 .lower_base_vertex = true
52 #define COMMON_SCALAR_OPTIONS \
53 .lower_to_scalar = true, \
54 .lower_pack_half_2x16 = true, \
55 .lower_pack_snorm_2x16 = true, \
56 .lower_pack_snorm_4x8 = true, \
57 .lower_pack_unorm_2x16 = true, \
58 .lower_pack_unorm_4x8 = true, \
59 .lower_unpack_half_2x16 = true, \
60 .lower_unpack_snorm_2x16 = true, \
61 .lower_unpack_snorm_4x8 = true, \
62 .lower_unpack_unorm_2x16 = true, \
63 .lower_unpack_unorm_4x8 = true, \
64 .lower_usub_sat64 = true, \
65 .lower_hadd64 = true, \
66 .max_unroll_iterations = 32
68 static const struct nir_shader_compiler_options scalar_nir_options
= {
70 COMMON_SCALAR_OPTIONS
,
73 static const struct nir_shader_compiler_options vector_nir_options
= {
76 /* In the vec4 backend, our dpN instruction replicates its result to all the
77 * components of a vec4. We would like NIR to give us replicated fdot
78 * instructions because it can optimize better for us.
80 .fdot_replicates
= true,
82 .lower_pack_snorm_2x16
= true,
83 .lower_pack_unorm_2x16
= true,
84 .lower_unpack_snorm_2x16
= true,
85 .lower_unpack_unorm_2x16
= true,
86 .lower_extract_byte
= true,
87 .lower_extract_word
= true,
89 .max_unroll_iterations
= 32,
93 brw_compiler_create(void *mem_ctx
, const struct gen_device_info
*devinfo
)
95 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
97 compiler
->devinfo
= devinfo
;
99 brw_fs_alloc_reg_sets(compiler
);
100 brw_vec4_alloc_reg_set(compiler
);
101 brw_init_compaction_tables(devinfo
);
103 compiler
->precise_trig
= env_var_as_boolean("INTEL_PRECISE_TRIG", false);
105 compiler
->use_tcs_8_patch
=
106 devinfo
->gen
>= 12 ||
107 (devinfo
->gen
>= 9 && (INTEL_DEBUG
& DEBUG_TCS_EIGHT_PATCH
));
109 if (devinfo
->gen
>= 10) {
110 /* We don't support vec4 mode on Cannonlake. */
111 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_STAGES
; i
++)
112 compiler
->scalar_stage
[i
] = true;
114 compiler
->scalar_stage
[MESA_SHADER_VERTEX
] =
115 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
116 compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
] =
117 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
118 compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
] =
119 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
120 compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
] =
121 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
122 compiler
->scalar_stage
[MESA_SHADER_FRAGMENT
] = true;
123 compiler
->scalar_stage
[MESA_SHADER_COMPUTE
] = true;
126 nir_lower_int64_options int64_options
=
130 nir_lower_imul_high64
;
131 nir_lower_doubles_options fp64_options
=
139 nir_lower_dround_even
|
144 if (!devinfo
->has_64bit_float
|| (INTEL_DEBUG
& DEBUG_SOFT64
)) {
145 int64_options
|= nir_lower_mov64
|
154 fp64_options
|= nir_lower_fp64_full_software
;
157 /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
158 * destination type can be Quadword and source type Doubleword for Gen8 and
159 * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
161 if (devinfo
->gen
< 8 || devinfo
->gen
> 9)
162 int64_options
|= nir_lower_imul_2x32_64
;
164 /* We want the GLSL compiler to emit code that uses condition codes */
165 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
166 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 0;
167 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
168 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
170 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
171 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
173 bool is_scalar
= compiler
->scalar_stage
[i
];
175 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
176 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
177 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
179 struct nir_shader_compiler_options
*nir_options
=
180 rzalloc(compiler
, struct nir_shader_compiler_options
);
182 *nir_options
= scalar_nir_options
;
184 *nir_options
= vector_nir_options
;
187 /* Prior to Gen6, there are no three source operations, and Gen11 loses
190 nir_options
->lower_ffma
= devinfo
->gen
< 6;
191 nir_options
->lower_flrp32
= devinfo
->gen
< 6 || devinfo
->gen
>= 11;
192 nir_options
->lower_fpow
= devinfo
->gen
>= 12;
194 nir_options
->lower_rotate
= devinfo
->gen
< 11;
195 nir_options
->lower_bitfield_reverse
= devinfo
->gen
< 7;
197 nir_options
->lower_int64_options
= int64_options
;
198 nir_options
->lower_doubles_options
= fp64_options
;
200 nir_options
->unify_interfaces
= i
< MESA_SHADER_FRAGMENT
;
202 compiler
->glsl_compiler_options
[i
].NirOptions
= nir_options
;
204 compiler
->glsl_compiler_options
[i
].ClampBlockIndicesToArrayBounds
= true;
207 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectInput
= false;
208 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_EVAL
].EmitNoIndirectInput
= false;
209 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectOutput
= false;
211 if (compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
])
212 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].EmitNoIndirectInput
= false;
218 insert_u64_bit(uint64_t *val
, bool add
)
220 *val
= (*val
<< 1) | !!add
;
224 brw_get_compiler_config_value(const struct brw_compiler
*compiler
)
227 insert_u64_bit(&config
, compiler
->precise_trig
);
228 if (compiler
->devinfo
->gen
>= 8 && compiler
->devinfo
->gen
< 10) {
229 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_VERTEX
]);
230 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
]);
231 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
]);
232 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
]);
234 uint64_t debug_bits
= INTEL_DEBUG
;
235 uint64_t mask
= DEBUG_DISK_CACHE_MASK
;
237 const uint64_t bit
= 1ULL << (ffsll(mask
) - 1);
238 insert_u64_bit(&config
, (debug_bits
& bit
) != 0);
245 brw_prog_data_size(gl_shader_stage stage
)
247 STATIC_ASSERT(MESA_SHADER_VERTEX
== 0);
248 STATIC_ASSERT(MESA_SHADER_TESS_CTRL
== 1);
249 STATIC_ASSERT(MESA_SHADER_TESS_EVAL
== 2);
250 STATIC_ASSERT(MESA_SHADER_GEOMETRY
== 3);
251 STATIC_ASSERT(MESA_SHADER_FRAGMENT
== 4);
252 STATIC_ASSERT(MESA_SHADER_COMPUTE
== 5);
253 static const size_t stage_sizes
[] = {
254 sizeof(struct brw_vs_prog_data
),
255 sizeof(struct brw_tcs_prog_data
),
256 sizeof(struct brw_tes_prog_data
),
257 sizeof(struct brw_gs_prog_data
),
258 sizeof(struct brw_wm_prog_data
),
259 sizeof(struct brw_cs_prog_data
),
261 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
262 return stage_sizes
[stage
];
266 brw_prog_key_size(gl_shader_stage stage
)
268 static const size_t stage_sizes
[] = {
269 sizeof(struct brw_vs_prog_key
),
270 sizeof(struct brw_tcs_prog_key
),
271 sizeof(struct brw_tes_prog_key
),
272 sizeof(struct brw_gs_prog_key
),
273 sizeof(struct brw_wm_prog_key
),
274 sizeof(struct brw_cs_prog_key
),
276 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
277 return stage_sizes
[stage
];