OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
}
OPT(nir_copy_prop);
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
- OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+ OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
compiler->devinfo->gen >= 6);
OPT(nir_opt_intrinsics);
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
}
if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
OPT(nir_lower_int64, nir->options->lower_int64_options);
- /* This needs to be run after the first optimization pass but before we
- * lower indirect derefs away
- */
- if (compiler->supports_shader_constants) {
- OPT(nir_opt_large_constants, NULL, 32);
- }
-
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
if (is_scalar) {
/* Lower a bunch of stuff */
OPT(nir_lower_var_copies);
+ /* This needs to be run after the first optimization pass but before we
+ * lower indirect derefs away
+ */
+ if (compiler->supports_shader_constants) {
+ OPT(nir_opt_large_constants, NULL, 32);
+ }
+
OPT(nir_lower_system_values);
const nir_lower_subgroups_options subgroups_options = {
OPT(nir_lower_clip_cull_distance_arrays);
+ if ((devinfo->gen >= 8 || devinfo->is_haswell) && is_scalar) {
+ /* TODO: Yes, we could in theory do this on gen6 and earlier. However,
+ * that would require plumbing through support for these indirect
+ * scratch read/write messages with message registers and that's just a
+ * pain. Also, the primary benefit of this is for compute shaders which
+ * won't run on gen6 and earlier anyway.
+ *
+ * On gen7 and earlier the scratch space size is limited to 12kB.
+ * By enabling this optimization we may easily exceed this limit without
+ * having any fallback.
+ *
+ * The threshold of 128B was chosen semi-arbitrarily. The idea is that
+ * 128B per channel on a SIMD8 program is 32 registers or 25% of the
+ * register file. Any array that large is likely to cause pressure
+ * issues. Also, this value is sufficiently high that the benchmarks
+ * known to suffer from large temporary array issues are helped but
+ * nothing else in shader-db is hurt except for maybe that one kerbal
+ * space program shader.
+ */
+ OPT(nir_lower_vars_to_scratch, nir_var_function_temp, 128,
+ glsl_get_natural_size_align_bytes);
+ }
+
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
UNUSED bool progress; /* Written by OPT */
- OPT(brw_nir_lower_mem_access_bit_sizes);
+ OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
do {
progress = false;
OPT(brw_nir_lower_conversions);
if (is_scalar)
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
- OPT(nir_opt_move_comparisons);
+ OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_lower_bool_to_int32);
}
}
+uint32_t
+brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
+{
+ switch (atomic->intrinsic) {
+#define AOP_CASE(atom) \
+ case nir_intrinsic_image_atomic_##atom: \
+ case nir_intrinsic_bindless_image_atomic_##atom: \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(add): {
+ unsigned src_idx;
+ switch (atomic->intrinsic) {
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_add:
+ src_idx = 3;
+ break;
+ case nir_intrinsic_ssbo_atomic_add:
+ src_idx = 2;
+ break;
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_global_atomic_add:
+ src_idx = 1;
+ break;
+ default:
+ unreachable("Invalid add atomic opcode");
+ }
+
+ if (nir_src_is_const(atomic->src[src_idx])) {
+ int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
+ if (add_val == 1)
+ return BRW_AOP_INC;
+ else if (add_val == -1)
+ return BRW_AOP_DEC;
+ }
+ return BRW_AOP_ADD;
+ }
+
+ AOP_CASE(imin): return BRW_AOP_IMIN;
+ AOP_CASE(umin): return BRW_AOP_UMIN;
+ AOP_CASE(imax): return BRW_AOP_IMAX;
+ AOP_CASE(umax): return BRW_AOP_UMAX;
+ AOP_CASE(and): return BRW_AOP_AND;
+ AOP_CASE(or): return BRW_AOP_OR;
+ AOP_CASE(xor): return BRW_AOP_XOR;
+ AOP_CASE(exchange): return BRW_AOP_MOV;
+ AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
+
+#undef AOP_CASE
+#define AOP_CASE(atom) \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(fmin): return BRW_AOP_FMIN;
+ AOP_CASE(fmax): return BRW_AOP_FMAX;
+ AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
+
+#undef AOP_CASE
+
+ default:
+ unreachable("Unsupported NIR atomic intrinsic");
+ }
+}
+
enum brw_reg_type
brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
{