#include "brw_nir.h"
#include "brw_shader.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
#include "util/u_math.h"
if ((mode == nir_var_shader_in && is_input(intrin)) ||
(mode == nir_var_shader_out && is_output(intrin))) {
nir_src *offset = nir_get_io_offset_src(intrin);
- nir_const_value *const_offset = nir_src_as_const_value(*offset);
- if (const_offset) {
- intrin->const_index[0] += const_offset->u32[0];
+ if (nir_src_is_const(*offset)) {
+ intrin->const_index[0] += nir_src_as_uint(*offset);
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));
nir_src *vertex = nir_get_io_vertex_index_src(intrin);
if (vertex) {
- nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
- if (const_vertex) {
- intrin->const_index[0] += const_vertex->u32[0] *
+ if (nir_src_is_const(*vertex)) {
+ intrin->const_index[0] += nir_src_as_uint(*vertex) *
vue_map->num_per_vertex_slots;
} else {
b->cursor = nir_before_instr(&intrin->instr);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
}
- OPT(nir_opt_if);
+ OPT(nir_opt_if, false);
if (nir->options->max_unroll_iterations != 0) {
OPT(nir_opt_loop_unroll, indirect_mask);
}
lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
{
assert(alu->dest.dest.is_ssa);
- if (alu->dest.dest.ssa.bit_size != 16)
+ if (alu->dest.dest.ssa.bit_size >= 32)
return 0;
+ const struct brw_compiler *compiler = (const struct brw_compiler *) data;
+
switch (alu->op) {
case nir_op_idiv:
case nir_op_imod:
case nir_op_irem:
case nir_op_udiv:
case nir_op_umod:
+ case nir_op_fceil:
+ case nir_op_ffloor:
+ case nir_op_ffract:
+ case nir_op_fround_even:
+ case nir_op_ftrunc:
return 32;
+ case nir_op_frcp:
+ case nir_op_frsq:
+ case nir_op_fsqrt:
+ case nir_op_fpow:
+ case nir_op_fexp2:
+ case nir_op_flog2:
+ case nir_op_fsin:
+ case nir_op_fcos:
+ return compiler->devinfo->gen < 9 ? 32 : 0;
default:
return 0;
}
.lower_txb_shadow_clamp = true,
.lower_txd_shadow_clamp = true,
.lower_txd_offset_clamp = true,
+ .lower_tg4_offsets = true,
};
OPT(nir_lower_tex, &tex_options);
OPT(nir_opt_large_constants, NULL, 32);
}
- OPT(nir_lower_bit_size, lower_bit_size_callback, NULL);
+ OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
if (is_scalar) {
OPT(nir_lower_load_const_to_scalar);
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
+ /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
+ * SSBOs, our back-end is capable of loading an entire vec4 at a time and
+ * we would like to take advantage of that whenever possible regardless of
+ * whether or not the app gives us full loads. This should allow the
+ * optimizer to combine UBO and SSBO load operations and save us some send
+ * messages.
+ */
+ OPT(nir_lower_array_deref_of_vec,
+ nir_var_mem_ubo | nir_var_mem_ssbo,
+ nir_lower_direct_array_deref_of_vec_load);
+
/* Get rid of split copies */
nir = brw_nir_optimize(nir, compiler, is_scalar, false);
OPT(brw_nir_opt_peephole_ffma);
}
+ if (OPT(nir_opt_comparison_pre)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+
+ /* Do the select peepehole again. nir_opt_comparison_pre (combined with
+ * the other optimization passes) will have removed at least one
+ * instruction from one of the branches of the if-statement, so now it
+ * might be under the threshold of conversion to bcsel.
+ *
+ * See brw_nir_optimize for the explanation of is_vec4_tessellation.
+ */
+ const bool is_vec4_tessellation = !is_scalar &&
+ (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL);
+ OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
+ OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
+ compiler->devinfo->gen >= 6);
+ }
+
OPT(nir_opt_algebraic_late);
+ OPT(brw_nir_lower_conversions);
+
OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);