2 * Copyright © 2015-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_compiler.h"
25 #include "brw_shader.h"
27 #include "dev/gen_debug.h"
28 #include "compiler/nir/nir.h"
29 #include "main/errors.h"
30 #include "util/debug.h"
32 #define COMMON_OPTIONS \
36 .lower_flrp16 = true, \
38 .lower_bitfield_extract = true, \
39 .lower_bitfield_insert = true, \
40 .lower_uadd_carry = true, \
41 .lower_usub_borrow = true, \
43 .lower_flrp64 = true, \
44 .lower_isign = true, \
45 .lower_ldexp = true, \
46 .lower_device_index_to_zero = true, \
47 .vectorize_io = true, \
48 .use_interpolated_input_intrinsics = true, \
49 .vertex_id_zero_based = true, \
50 .lower_base_vertex = true, \
51 .use_scoped_barrier = true, \
52 .support_8bit_alu = true, \
53 .support_16bit_alu = true
55 #define COMMON_SCALAR_OPTIONS \
56 .lower_to_scalar = true, \
57 .lower_pack_half_2x16 = true, \
58 .lower_pack_snorm_2x16 = true, \
59 .lower_pack_snorm_4x8 = true, \
60 .lower_pack_unorm_2x16 = true, \
61 .lower_pack_unorm_4x8 = true, \
62 .lower_unpack_half_2x16 = true, \
63 .lower_unpack_snorm_2x16 = true, \
64 .lower_unpack_snorm_4x8 = true, \
65 .lower_unpack_unorm_2x16 = true, \
66 .lower_unpack_unorm_4x8 = true, \
67 .lower_usub_sat64 = true, \
68 .lower_hadd64 = true, \
69 .lower_bfe_with_two_constants = true, \
70 .max_unroll_iterations = 32
72 static const struct nir_shader_compiler_options scalar_nir_options
= {
74 COMMON_SCALAR_OPTIONS
,
77 static const struct nir_shader_compiler_options vector_nir_options
= {
80 /* In the vec4 backend, our dpN instruction replicates its result to all the
81 * components of a vec4. We would like NIR to give us replicated fdot
82 * instructions because it can optimize better for us.
84 .fdot_replicates
= true,
86 .lower_pack_snorm_2x16
= true,
87 .lower_pack_unorm_2x16
= true,
88 .lower_unpack_snorm_2x16
= true,
89 .lower_unpack_unorm_2x16
= true,
90 .lower_extract_byte
= true,
91 .lower_extract_word
= true,
93 .max_unroll_iterations
= 32,
97 brw_compiler_create(void *mem_ctx
, const struct gen_device_info
*devinfo
)
99 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
101 compiler
->devinfo
= devinfo
;
103 brw_fs_alloc_reg_sets(compiler
);
104 brw_vec4_alloc_reg_set(compiler
);
106 compiler
->precise_trig
= env_var_as_boolean("INTEL_PRECISE_TRIG", false);
108 compiler
->use_tcs_8_patch
=
109 devinfo
->gen
>= 12 ||
110 (devinfo
->gen
>= 9 && (INTEL_DEBUG
& DEBUG_TCS_EIGHT_PATCH
));
112 if (devinfo
->gen
>= 10) {
113 /* We don't support vec4 mode on Cannonlake. */
114 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_ALL_SHADER_STAGES
; i
++)
115 compiler
->scalar_stage
[i
] = true;
117 compiler
->scalar_stage
[MESA_SHADER_VERTEX
] =
118 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
119 compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
] =
120 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
121 compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
] =
122 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
123 compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
] =
124 devinfo
->gen
>= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
125 compiler
->scalar_stage
[MESA_SHADER_FRAGMENT
] = true;
126 compiler
->scalar_stage
[MESA_SHADER_COMPUTE
] = true;
129 nir_lower_int64_options int64_options
=
133 nir_lower_imul_high64
;
134 nir_lower_doubles_options fp64_options
=
142 nir_lower_dround_even
|
147 if (!devinfo
->has_64bit_float
|| (INTEL_DEBUG
& DEBUG_SOFT64
)) {
148 int64_options
|= (nir_lower_int64_options
)~0;
149 fp64_options
|= nir_lower_fp64_full_software
;
152 /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
153 * destination type can be Quadword and source type Doubleword for Gen8 and
154 * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
156 if (devinfo
->gen
< 8 || devinfo
->gen
> 9)
157 int64_options
|= nir_lower_imul_2x32_64
;
159 /* We want the GLSL compiler to emit code that uses condition codes */
160 for (int i
= 0; i
< MESA_ALL_SHADER_STAGES
; i
++) {
161 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 0;
162 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
163 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
165 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
166 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
168 bool is_scalar
= compiler
->scalar_stage
[i
];
170 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
= is_scalar
;
171 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
= is_scalar
;
172 compiler
->glsl_compiler_options
[i
].OptimizeForAOS
= !is_scalar
;
174 struct nir_shader_compiler_options
*nir_options
=
175 rzalloc(compiler
, struct nir_shader_compiler_options
);
177 *nir_options
= scalar_nir_options
;
179 *nir_options
= vector_nir_options
;
182 /* Prior to Gen6, there are no three source operations, and Gen11 loses
185 nir_options
->lower_ffma
= devinfo
->gen
< 6;
186 nir_options
->lower_flrp32
= devinfo
->gen
< 6 || devinfo
->gen
>= 11;
187 nir_options
->lower_fpow
= devinfo
->gen
>= 12;
189 nir_options
->lower_rotate
= devinfo
->gen
< 11;
190 nir_options
->lower_bitfield_reverse
= devinfo
->gen
< 7;
192 nir_options
->lower_int64_options
= int64_options
;
193 nir_options
->lower_doubles_options
= fp64_options
;
195 nir_options
->unify_interfaces
= i
< MESA_SHADER_FRAGMENT
;
197 compiler
->glsl_compiler_options
[i
].NirOptions
= nir_options
;
199 compiler
->glsl_compiler_options
[i
].ClampBlockIndicesToArrayBounds
= true;
202 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectInput
= false;
203 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_EVAL
].EmitNoIndirectInput
= false;
204 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].EmitNoIndirectOutput
= false;
206 if (compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
])
207 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].EmitNoIndirectInput
= false;
213 insert_u64_bit(uint64_t *val
, bool add
)
215 *val
= (*val
<< 1) | !!add
;
219 brw_get_compiler_config_value(const struct brw_compiler
*compiler
)
222 insert_u64_bit(&config
, compiler
->precise_trig
);
223 if (compiler
->devinfo
->gen
>= 8 && compiler
->devinfo
->gen
< 10) {
224 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_VERTEX
]);
225 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
]);
226 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_TESS_EVAL
]);
227 insert_u64_bit(&config
, compiler
->scalar_stage
[MESA_SHADER_GEOMETRY
]);
229 uint64_t debug_bits
= INTEL_DEBUG
;
230 uint64_t mask
= DEBUG_DISK_CACHE_MASK
;
232 const uint64_t bit
= 1ULL << (ffsll(mask
) - 1);
233 insert_u64_bit(&config
, (debug_bits
& bit
) != 0);
240 brw_prog_data_size(gl_shader_stage stage
)
242 static const size_t stage_sizes
[] = {
243 [MESA_SHADER_VERTEX
] = sizeof(struct brw_vs_prog_data
),
244 [MESA_SHADER_TESS_CTRL
] = sizeof(struct brw_tcs_prog_data
),
245 [MESA_SHADER_TESS_EVAL
] = sizeof(struct brw_tes_prog_data
),
246 [MESA_SHADER_GEOMETRY
] = sizeof(struct brw_gs_prog_data
),
247 [MESA_SHADER_FRAGMENT
] = sizeof(struct brw_wm_prog_data
),
248 [MESA_SHADER_COMPUTE
] = sizeof(struct brw_cs_prog_data
),
249 [MESA_SHADER_KERNEL
] = sizeof(struct brw_cs_prog_data
),
251 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
252 return stage_sizes
[stage
];
256 brw_prog_key_size(gl_shader_stage stage
)
258 static const size_t stage_sizes
[] = {
259 [MESA_SHADER_VERTEX
] = sizeof(struct brw_vs_prog_key
),
260 [MESA_SHADER_TESS_CTRL
] = sizeof(struct brw_tcs_prog_key
),
261 [MESA_SHADER_TESS_EVAL
] = sizeof(struct brw_tes_prog_key
),
262 [MESA_SHADER_GEOMETRY
] = sizeof(struct brw_gs_prog_key
),
263 [MESA_SHADER_FRAGMENT
] = sizeof(struct brw_wm_prog_key
),
264 [MESA_SHADER_COMPUTE
] = sizeof(struct brw_cs_prog_key
),
265 [MESA_SHADER_KERNEL
] = sizeof(struct brw_cs_prog_key
),
267 assert((int)stage
>= 0 && stage
< ARRAY_SIZE(stage_sizes
));
268 return stage_sizes
[stage
];
272 brw_write_shader_relocs(const struct gen_device_info
*devinfo
,
274 const struct brw_stage_prog_data
*prog_data
,
275 struct brw_shader_reloc_value
*values
,
278 for (unsigned i
= 0; i
< prog_data
->num_relocs
; i
++) {
279 assert(prog_data
->relocs
[i
].offset
% 8 == 0);
280 brw_inst
*inst
= (brw_inst
*)(program
+ prog_data
->relocs
[i
].offset
);
281 for (unsigned j
= 0; j
< num_values
; j
++) {
282 if (prog_data
->relocs
[i
].id
== values
[j
].id
) {
283 brw_update_reloc_imm(devinfo
, inst
, values
[j
].value
);