(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
- OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+ OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
compiler->devinfo->gen >= 6);
OPT(nir_opt_intrinsics);
/* Workaround Gfxbench unused local sampler variable which will trigger an
* assert in the opt_large_constants pass.
*/
- OPT(nir_remove_dead_variables, nir_var_function_temp);
+ OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
}
static unsigned
}
if (nir->info.stage == MESA_SHADER_GEOMETRY)
- OPT(nir_lower_gs_intrinsics);
+ OPT(nir_lower_gs_intrinsics, false);
/* See also brw_nir_trig_workarounds.py */
if (compiler->precise_trig &&
!(devinfo->gen >= 10 || devinfo->is_kabylake))
OPT(brw_nir_apply_trig_workarounds);
+ if (devinfo->gen >= 12)
+ OPT(brw_nir_clamp_image_1d_2d_array_sizes);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_txf_offset = true,
.lower_to_scalar = true,
.lower_vote_trivial = !is_scalar,
.lower_shuffle = true,
+ .lower_quad_broadcast_dynamic = true,
};
OPT(nir_lower_subgroups, &subgroups_options);
OPT(nir_lower_clip_cull_distance_arrays);
+ if ((devinfo->gen >= 8 || devinfo->is_haswell) && is_scalar) {
+ /* TODO: Yes, we could in theory do this on gen6 and earlier. However,
+ * that would require plumbing through support for these indirect
+ * scratch read/write messages with message registers and that's just a
+ * pain. Also, the primary benefit of this is for compute shaders which
+ * won't run on gen6 and earlier anyway.
+ *
+ * On gen7 and earlier the scratch space size is limited to 12kB.
+ * By enabling this optimization we may easily exceed this limit without
+ * having any fallback.
+ *
+ * The threshold of 128B was chosen semi-arbitrarily. The idea is that
+ * 128B per channel on a SIMD8 program is 32 registers or 25% of the
+ * register file. Any array that large is likely to cause pressure
+ * issues. Also, this value is sufficiently high that the benchmarks
+ * known to suffer from large temporary array issues are helped but
+ * nothing else in shader-db is hurt except for maybe that one kerbal
+ * space program shader.
+ */
+ OPT(nir_lower_vars_to_scratch, nir_var_function_temp, 128,
+ glsl_get_natural_size_align_bytes);
+ }
+
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
- NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
- NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
+ NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
+ NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
}
}
+static bool
+brw_nir_should_vectorize_mem(unsigned align, unsigned bit_size,
+ unsigned num_components, unsigned high_offset,
+ nir_intrinsic_instr *low,
+ nir_intrinsic_instr *high)
+{
+ /* Don't combine things to generate 64-bit loads/stores. We have to split
+ * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
+ * we don't want to make a mess for the back-end.
+ */
+ if (bit_size > 32)
+ return false;
+
+ /* We can handle at most a vec4 right now. Anything bigger would get
+ * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
+ */
+ if (num_components > 4)
+ return false;
+
+ if (align < bit_size / 8)
+ return false;
+
+ return true;
+}
+
+static
+bool combine_all_barriers(nir_intrinsic_instr *a,
+ nir_intrinsic_instr *b,
+ void *data)
+{
+ /* Translation to backend IR will get rid of modes we don't care about, so
+ * no harm in always combining them.
+ *
+ * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
+ * scheduling so that it can take advantage of the different semantics.
+ */
+ nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
+ nir_intrinsic_memory_modes(b));
+ nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
+ nir_intrinsic_memory_semantics(b));
+ nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
+ nir_intrinsic_memory_scope(b)));
+ return true;
+}
+
+static void
+brw_vectorize_lower_mem_access(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar)
+{
+ const struct gen_device_info *devinfo = compiler->devinfo;
+ bool progress = false;
+
+ if (is_scalar) {
+ OPT(nir_opt_load_store_vectorize,
+ nir_var_mem_ubo | nir_var_mem_ssbo |
+ nir_var_mem_global | nir_var_mem_shared,
+ brw_nir_should_vectorize_mem,
+ (nir_variable_mode)0);
+ }
+
+ OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
+
+ while (progress) {
+ progress = false;
+
+ OPT(nir_lower_pack);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ OPT(nir_opt_algebraic);
+ OPT(nir_opt_constant_folding);
+ }
+}
+
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
UNUSED bool progress; /* Written by OPT */
- OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
+ OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
do {
progress = false;
brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_vectorize_lower_mem_access(nir, compiler, is_scalar);
+
if (OPT(nir_lower_int64, nir->options->lower_int64_options))
brw_nir_optimize(nir, compiler, is_scalar, false);
* havok on the vec4 backend. The handling of constants in the vec4
* backend is not good.
*/
- if (is_scalar) {
+ if (is_scalar)
OPT(nir_opt_constant_folding);
- OPT(nir_copy_prop);
- }
+
+ OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
if (is_scalar)
OPT(nir_lower_alu_to_scalar, NULL, NULL);
- OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
+
+ while (OPT(nir_opt_algebraic_distribute_src_mods)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ }
+
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_lower_bool_to_int32);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
OPT(nir_lower_locals_to_regs);
if (key_tex->swizzles[s] == SWIZZLE_NOOP)
continue;
- tex_options.swizzle_result |= (1 << s);
+ tex_options.swizzle_result |= BITFIELD_BIT(s);
for (unsigned c = 0; c < 4; c++)
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
}