.lower_fdiv = true, \
.lower_scmp = true, \
.lower_flrp16 = true, \
- .lower_fmod16 = true, \
- .lower_fmod32 = true, \
- .lower_fmod64 = false, \
+ .lower_fmod = true, \
.lower_bitfield_extract = true, \
.lower_bitfield_insert = true, \
.lower_uadd_carry = true, \
.lower_isign = true, \
.lower_ldexp = true, \
.lower_device_index_to_zero = true, \
+ .vectorize_io = true, \
.use_interpolated_input_intrinsics = true, \
.vertex_id_zero_based = true, \
.lower_base_vertex = true
compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);
+ compiler->use_tcs_8_patch =
+ devinfo->gen >= 9 && (INTEL_DEBUG & DEBUG_TCS_EIGHT_PATCH);
+
if (devinfo->gen >= 10) {
/* We don't support vec4 mode on Cannonlake. */
for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++)
nir_options->lower_ffma = devinfo->gen < 6;
nir_options->lower_flrp32 = devinfo->gen < 6 || devinfo->gen >= 11;
+ nir_options->lower_rotate = devinfo->gen < 11;
+
nir_options->lower_int64_options = int64_options;
nir_options->lower_doubles_options = fp64_options;
compiler->glsl_compiler_options[i].NirOptions = nir_options;
assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
return stage_sizes[stage];
}
+
+void
+brw_prog_key_set_id(union brw_any_prog_key *key,
+ gl_shader_stage stage,
+ unsigned id)
+{
+ static const unsigned stage_offsets[] = {
+ offsetof(struct brw_vs_prog_key, program_string_id),
+ offsetof(struct brw_tcs_prog_key, program_string_id),
+ offsetof(struct brw_tes_prog_key, program_string_id),
+ offsetof(struct brw_gs_prog_key, program_string_id),
+ offsetof(struct brw_wm_prog_key, program_string_id),
+ offsetof(struct brw_cs_prog_key, program_string_id),
+ };
+ assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_offsets));
+ *(unsigned*)((uint8_t*)key + stage_offsets[stage]) = id;
+}