#include "brw_nir.h"
#include "brw_shader.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
#include "util/u_math.h"
-static bool
-is_input(nir_intrinsic_instr *intrin)
-{
- return intrin->intrinsic == nir_intrinsic_load_input ||
- intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
- intrin->intrinsic == nir_intrinsic_load_interpolated_input;
-}
-
-static bool
-is_output(nir_intrinsic_instr *intrin)
-{
- return intrin->intrinsic == nir_intrinsic_load_output ||
- intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
- intrin->intrinsic == nir_intrinsic_store_output ||
- intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
-}
-
-/**
- * In many cases, we just add the base and offset together, so there's no
- * reason to keep them separate. Sometimes, combining them is essential:
- * if a shader only accesses part of a compound variable (such as a matrix
- * or array), the variable's base may not actually exist in the VUE map.
- *
- * This pass adds constant offsets to instr->const_index[0], and resets
- * the offset source to 0. Non-constant offsets remain unchanged - since
- * we don't know what part of a compound variable is accessed, we allocate
- * storage for the entire thing.
- */
-
-static bool
-add_const_offset_to_base_block(nir_block *block, nir_builder *b,
- nir_variable_mode mode)
-{
- nir_foreach_instr_safe(instr, block) {
- if (instr->type != nir_instr_type_intrinsic)
- continue;
-
- nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
- if ((mode == nir_var_shader_in && is_input(intrin)) ||
- (mode == nir_var_shader_out && is_output(intrin))) {
- nir_src *offset = nir_get_io_offset_src(intrin);
- nir_const_value *const_offset = nir_src_as_const_value(*offset);
-
- if (const_offset) {
- intrin->const_index[0] += const_offset->u32[0];
- b->cursor = nir_before_instr(&intrin->instr);
- nir_instr_rewrite_src(&intrin->instr, offset,
- nir_src_for_ssa(nir_imm_int(b, 0)));
- }
- }
- }
- return true;
-}
-
-static void
-add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
-{
- nir_foreach_function(f, nir) {
- if (f->impl) {
- nir_builder b;
- nir_builder_init(&b, f->impl);
- nir_foreach_block(block, f->impl) {
- add_const_offset_to_base_block(block, &b, mode);
- }
- }
- }
-}
-
static bool
remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
GLenum primitive_mode)
return true;
}
+static bool
+is_input(nir_intrinsic_instr *intrin)
+{
+ return intrin->intrinsic == nir_intrinsic_load_input ||
+ intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
+ intrin->intrinsic == nir_intrinsic_load_interpolated_input;
+}
+
+static bool
+is_output(nir_intrinsic_instr *intrin)
+{
+ return intrin->intrinsic == nir_intrinsic_load_output ||
+ intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+ intrin->intrinsic == nir_intrinsic_store_output ||
+ intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+}
+
+
static bool
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map,
nir_src *vertex = nir_get_io_vertex_index_src(intrin);
if (vertex) {
- nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
- if (const_vertex) {
- intrin->const_index[0] += const_vertex->u32[0] *
+ if (nir_src_is_const(*vertex)) {
+ intrin->const_index[0] += nir_src_as_uint(*vertex) *
vue_map->num_per_vertex_slots;
} else {
b->cursor = nir_before_instr(&intrin->instr);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (!function->impl)
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (function->impl) {
lower_io_options |= nir_lower_io_force_sample_interpolation;
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
+ if (devinfo->gen >= 11)
+ nir_lower_interpolation(nir, ~0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
}
void
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_out);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
nir_foreach_function(function, nir) {
if (function->impl) {
return indirect_mask;
}
-nir_shader *
+void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
{
brw_nir_no_indirect_mask(compiler, nir->info.stage);
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
+
do {
progress = false;
OPT(nir_split_array_vars, nir_var_function_temp);
}
OPT(nir_opt_copy_prop_vars);
OPT(nir_opt_dead_write_vars);
+ OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar);
+ OPT(nir_lower_alu_to_scalar, NULL);
}
OPT(nir_copy_prop);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
+ OPT(nir_opt_combine_stores, nir_var_all);
/* Passing 0 to the peephole select pass causes it to convert
* if-statements that contain only move instructions in the branches
OPT(nir_opt_idiv_const, 32);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ /* To match the old behavior, set always_precise only for scalar
+ * shader stages.
+ */
+ if (OPT(nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ compiler->devinfo->gen >= 6)) {
+ OPT(nir_opt_constant_folding);
+ }
+
+ /* Nothing should rematerialize any flrps, so we only need to do this
+ * lowering once.
+ */
+ lower_flrp = 0;
+ }
+
OPT(nir_opt_dead_cf);
if (OPT(nir_opt_trivial_continues)) {
/* If nir_opt_trivial_continues makes progress, then we need to clean
OPT(nir_copy_prop);
OPT(nir_opt_dce);
}
- OPT(nir_opt_if);
+ OPT(nir_opt_if, false);
if (nir->options->max_unroll_iterations != 0) {
OPT(nir_opt_loop_unroll, indirect_mask);
}
* assert in the opt_large_constants pass.
*/
OPT(nir_remove_dead_variables, nir_var_function_temp);
-
- return nir;
}
static unsigned
lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
{
assert(alu->dest.dest.is_ssa);
- if (alu->dest.dest.ssa.bit_size != 16)
+ if (alu->dest.dest.ssa.bit_size >= 32)
return 0;
+ const struct brw_compiler *compiler = (const struct brw_compiler *) data;
+
switch (alu->op) {
case nir_op_idiv:
case nir_op_imod:
case nir_op_irem:
case nir_op_udiv:
case nir_op_umod:
+ case nir_op_fceil:
+ case nir_op_ffloor:
+ case nir_op_ffract:
+ case nir_op_fround_even:
+ case nir_op_ftrunc:
return 32;
+ case nir_op_frcp:
+ case nir_op_frsq:
+ case nir_op_fsqrt:
+ case nir_op_fpow:
+ case nir_op_fexp2:
+ case nir_op_flog2:
+ case nir_op_fsin:
+ case nir_op_fcos:
+ return compiler->devinfo->gen < 9 ? 32 : 0;
default:
return 0;
}
* intended for the FS backend as long as nir_optimize is called again with
* is_scalar = true to scalarize everything prior to code gen.
*/
-nir_shader *
+void
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
const nir_shader *softfp64)
{
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar);
+ OPT(nir_lower_alu_to_scalar, NULL);
}
- /* Run opt_algebraic before int64 lowering so we can hopefully get rid
- * of some int64 instructions.
- */
- OPT(nir_opt_algebraic);
-
- /* Lower 64-bit operations before nir_optimize so that loop unrolling sees
- * their actual cost.
- */
- bool lowered_64bit_ops = false;
- do {
- progress = false;
-
- OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
-
- /* Necessary to lower add -> sub and div -> mul/rcp */
- OPT(nir_opt_algebraic);
-
- lowered_64bit_ops |= progress;
- } while (progress);
-
if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
.lower_txp = ~0,
.lower_txf_offset = true,
.lower_rect_offset = true,
+ .lower_tex_without_implicit_lod = true,
.lower_txd_cube_map = true,
.lower_txb_shadow_clamp = true,
.lower_txd_shadow_clamp = true,
.lower_txd_offset_clamp = true,
+ .lower_tg4_offsets = true,
};
OPT(nir_lower_tex, &tex_options);
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
- nir = brw_nir_optimize(nir, compiler, is_scalar, true);
+ brw_nir_optimize(nir, compiler, is_scalar, true);
+
+ bool lowered_64bit_ops = false;
+ do {
+ progress = false;
+
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+
+ /* Necessary to lower add -> sub and div -> mul/rcp */
+ OPT(nir_opt_algebraic);
+
+ lowered_64bit_ops |= progress;
+ } while (progress);
/* This needs to be run after the first optimization pass but before we
* lower indirect derefs away
OPT(nir_opt_large_constants, NULL, 32);
}
- OPT(nir_lower_bit_size, lower_bit_size_callback, NULL);
+ OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
if (is_scalar) {
OPT(nir_lower_load_const_to_scalar);
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
- OPT(brw_nir_lower_mem_access_bit_sizes);
+ /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
+ * SSBOs, our back-end is capable of loading an entire vec4 at a time and
+ * we would like to take advantage of that whenever possible regardless of
+ * whether or not the app gives us full loads. This should allow the
+ * optimizer to combine UBO and SSBO load operations and save us some send
+ * messages.
+ */
+ OPT(nir_lower_array_deref_of_vec,
+ nir_var_mem_ubo | nir_var_mem_ssbo,
+ nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
-
- return nir;
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
- nir_shader **producer, nir_shader **consumer)
+ nir_shader *producer, nir_shader *consumer)
{
- nir_lower_io_arrays_to_elements(*producer, *consumer);
- nir_validate_shader(*producer, "after nir_lower_io_arrays_to_elements");
- nir_validate_shader(*consumer, "after nir_lower_io_arrays_to_elements");
+ nir_lower_io_arrays_to_elements(producer, consumer);
+ nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
+ nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
- const bool p_is_scalar =
- compiler->scalar_stage[(*producer)->info.stage];
- const bool c_is_scalar =
- compiler->scalar_stage[(*consumer)->info.stage];
+ const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
+ const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
if (p_is_scalar && c_is_scalar) {
- NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
- NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
+ NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+ brw_nir_optimize(producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
- if (nir_link_opt_varyings(*producer, *consumer))
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ if (nir_link_opt_varyings(producer, consumer))
+ brw_nir_optimize(consumer, compiler, c_is_scalar, false);
- NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
- NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+ NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
+ NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
- if (nir_remove_unused_varyings(*producer, *consumer)) {
- NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
- NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
+ if (nir_remove_unused_varyings(producer, consumer)) {
+ NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+ NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
/* The backend might not be able to handle indirects on
* temporaries so we need to lower indirects on any of the
* varyings we have demoted here.
*/
- NIR_PASS_V(*producer, nir_lower_indirect_derefs,
- brw_nir_no_indirect_mask(compiler, (*producer)->info.stage));
- NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
- brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
+ NIR_PASS_V(producer, nir_lower_indirect_derefs,
+ brw_nir_no_indirect_mask(compiler, producer->info.stage));
+ NIR_PASS_V(consumer, nir_lower_indirect_derefs,
+ brw_nir_no_indirect_mask(compiler, consumer->info.stage));
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+ }
+
+ NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
+ NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
+ NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
+
+ if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
+ /* Calling lower_io_to_vector creates output variable writes with
+ * write-masks. On non-TCS outputs, the back-end can't handle it and we
+ * need to call nir_lower_io_to_temporaries to get rid of them. This,
+ * in turn, creates temporary variables and extra copy_deref intrinsics
+ * that we need to clean up.
+ */
+ NIR_PASS_V(producer, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(producer), true, false);
+ NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+ NIR_PASS_V(producer, nir_split_var_copies);
+ NIR_PASS_V(producer, nir_lower_var_copies);
}
}
* called on a shader, it will no longer be in SSA form so most optimizations
* will not work.
*/
-nir_shader *
+void
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar)
{
UNUSED bool progress; /* Written by OPT */
+ OPT(brw_nir_lower_mem_access_bit_sizes);
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
do {
progress = false;
OPT(nir_opt_algebraic_before_ffma);
} while (progress);
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(brw_nir_opt_peephole_ffma);
}
- OPT(nir_opt_algebraic_late);
+ if (OPT(nir_opt_comparison_pre)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+
+ /* Do the select peepehole again. nir_opt_comparison_pre (combined with
+ * the other optimization passes) will have removed at least one
+ * instruction from one of the branches of the if-statement, so now it
+ * might be under the threshold of conversion to bcsel.
+ *
+ * See brw_nir_optimize for the explanation of is_vec4_tessellation.
+ */
+ const bool is_vec4_tessellation = !is_scalar &&
+ (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL);
+ OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
+ OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
+ compiler->devinfo->gen >= 6);
+ }
+
+ do {
+ progress = false;
+ if (OPT(nir_opt_algebraic_late)) {
+ /* At this late stage, anything that makes more constants will wreak
+ * havok on the vec4 backend. The handling of constants in the vec4
+ * backend is not good.
+ */
+ if (is_scalar) {
+ OPT(nir_opt_constant_folding);
+ OPT(nir_copy_prop);
+ }
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ }
+ } while (progress);
+
+ OPT(brw_nir_lower_conversions);
+
+ if (is_scalar)
+ OPT(nir_lower_alu_to_scalar, NULL);
OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_dce);
+ if (OPT(nir_opt_rematerialize_compares))
+ OPT(nir_opt_dce);
+
/* This is the last pass we run before we start emitting stuff. It
* determines when we need to insert boolean resolves on Gen <= 5. We
* run it last because it stashes data in instr->pass_flags and we don't
_mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
-
- return nir;
}
-nir_shader *
+void
brw_nir_apply_sampler_key(nir_shader *nir,
const struct brw_compiler *compiler,
const struct brw_sampler_prog_key_data *key_tex,
{
const struct gen_device_info *devinfo = compiler->devinfo;
nir_lower_tex_options tex_options = {
+ .lower_txd_clamp_bindless_sampler = true,
.lower_txd_clamp_if_sampler_index_not_lt_16 = true,
};
if (nir_lower_tex(nir, &tex_options)) {
nir_validate_shader(nir, "after nir_lower_tex");
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
-
- return nir;
}
enum brw_reg_type
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
return nir;
}