nir_const_value *const_offset = nir_src_as_const_value(*offset);
if (const_offset) {
- intrin->const_index[0] += const_offset->u[0];
+ intrin->const_index[0] += const_offset->u32[0];
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));
if (vertex) {
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
- intrin->const_index[0] += const_vertex->u[0] *
+ intrin->const_index[0] += const_vertex->u32[0] *
state->vue_map->num_per_vertex_slots;
} else {
state->b.cursor = nir_before_instr(&intrin->instr);
* is_scalar = true to scalarize everything prior to code gen.
*/
nir_shader *
-brw_preprocess_nir(nir_shader *nir, bool is_scalar)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
{
bool progress; /* Written by OPT and OPT_V */
(void)progress;
+ const bool is_scalar = compiler->scalar_stage[nir->stage];
+
if (nir->stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
+ if (compiler->precise_trig)
+ OPT(brw_nir_apply_trig_workarounds);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
};
/* Get rid of split copies */
nir = nir_optimize(nir, is_scalar);
- OPT(nir_remove_dead_variables);
+ OPT(nir_remove_dead_variables, nir_var_local);
return nir;
}
(void)progress;
- nir = brw_preprocess_nir(nir, is_scalar);
+ nir = brw_preprocess_nir(brw->intelScreen->compiler, nir);
OPT(nir_lower_system_values);
OPT_V(brw_nir_lower_uniforms, is_scalar);