const uint8_t *vs_attrib_wa_flags)
{
/* Start with the location of the variable's base. */
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ nir_foreach_shader_in_variable(var, nir)
var->data.driver_location = var->data.location;
- }
/* Now use nir_lower_io to walk dereference chains. Attribute arrays are
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
brw_nir_lower_vue_inputs(nir_shader *nir,
const struct brw_vue_map *vue_map)
{
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ nir_foreach_shader_in_variable(var, nir)
var->data.driver_location = var->data.location;
- }
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
void
brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ nir_foreach_shader_in_variable(var, nir)
var->data.driver_location = var->data.location;
- }
nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
nir_lower_io_lower_64bit_to_32);
const struct gen_device_info *devinfo,
const struct brw_wm_prog_key *key)
{
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ nir_foreach_shader_in_variable(var, nir) {
var->data.driver_location = var->data.location;
/* Apply default interpolation mode.
void
brw_nir_lower_vue_outputs(nir_shader *nir)
{
- nir_foreach_variable(var, &nir->outputs) {
+ nir_foreach_shader_out_variable(var, nir) {
var->data.driver_location = var->data.location;
}
brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
GLenum tes_primitive_mode)
{
- nir_foreach_variable(var, &nir->outputs) {
+ nir_foreach_shader_out_variable(var, nir) {
var->data.driver_location = var->data.location;
}
void
brw_nir_lower_fs_outputs(nir_shader *nir)
{
- nir_foreach_variable(var, &nir->outputs) {
+ nir_foreach_shader_out_variable(var, nir) {
var->data.driver_location =
SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
gl_shader_stage stage)
{
+ const struct gen_device_info *devinfo = compiler->devinfo;
+ const bool is_scalar = compiler->scalar_stage[stage];
nir_variable_mode indirect_mask = 0;
- if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_FRAGMENT:
indirect_mask |= nir_var_shader_in;
- if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
+ break;
+
+ case MESA_SHADER_GEOMETRY:
+ if (!is_scalar)
+ indirect_mask |= nir_var_shader_in;
+ break;
+
+ default:
+ /* Everything else can handle indirect inputs */
+ break;
+ }
+
+ if (is_scalar && stage != MESA_SHADER_TESS_CTRL)
indirect_mask |= nir_var_shader_out;
- if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
+
+ /* On HSW+, we allow indirects in scalar shaders. They get implemented
+ * using nir_lower_vars_to_explicit_types and nir_lower_explicit_io in
+ * brw_postprocess_nir.
+ *
+ * We haven't plumbed through the indirect scratch messages on gen6 or
+ * earlier so doing indirects via scratch doesn't work there. On gen7 and
+ * earlier the scratch space size is limited to 12kB. If we allowed
+ * indirects as scratch all the time, we may easily exceed this limit
+ * without having any fallback.
+ */
+ if (is_scalar && devinfo->gen <= 7 && !devinfo->is_haswell)
indirect_mask |= nir_var_function_temp;
return indirect_mask;
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
{
- nir_variable_mode indirect_mask =
+ nir_variable_mode loop_indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
+ /* We can handle indirects via scratch messages. However, they are
+ * expensive so we'd rather not if we can avoid it. Have loop unrolling
+ * try to get rid of them.
+ */
+ if (is_scalar)
+ loop_indirect_mask |= nir_var_function_temp;
+
bool progress;
unsigned lower_flrp =
(nir->options->lower_flrp16 ? 16 : 0) |
OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
+ } else {
+ OPT(nir_opt_shrink_vectors);
}
OPT(nir_copy_prop);
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
- OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+ OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
compiler->devinfo->gen >= 6);
OPT(nir_opt_intrinsics);
OPT(nir_opt_if, false);
OPT(nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations != 0) {
- OPT(nir_opt_loop_unroll, indirect_mask);
+ OPT(nir_opt_loop_unroll, loop_indirect_mask);
}
OPT(nir_opt_remove_phis);
OPT(nir_opt_undef);
/* Workaround Gfxbench unused local sampler variable which will trigger an
* assert in the opt_large_constants pass.
*/
- OPT(nir_remove_dead_variables, nir_var_function_temp);
+ OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
}
static unsigned
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
}
if (nir->info.stage == MESA_SHADER_GEOMETRY)
- OPT(nir_lower_gs_intrinsics);
+ OPT(nir_lower_gs_intrinsics, false);
/* See also brw_nir_trig_workarounds.py */
if (compiler->precise_trig &&
!(devinfo->gen >= 10 || devinfo->is_kabylake))
OPT(brw_nir_apply_trig_workarounds);
+ if (devinfo->gen >= 12)
+ OPT(brw_nir_clamp_image_1d_2d_array_sizes);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_txf_offset = true,
brw_nir_optimize(nir, compiler, is_scalar, true);
OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
- OPT(nir_lower_int64, nir->options->lower_int64_options);
-
- /* This needs to be run after the first optimization pass but before we
- * lower indirect derefs away
- */
- if (compiler->supports_shader_constants) {
- OPT(nir_opt_large_constants, NULL, 32);
- }
+ OPT(nir_lower_int64);
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
/* Lower a bunch of stuff */
OPT(nir_lower_var_copies);
+ /* This needs to be run after the first optimization pass but before we
+ * lower indirect derefs away
+ */
+ if (compiler->supports_shader_constants) {
+ OPT(nir_opt_large_constants, NULL, 32);
+ }
+
OPT(nir_lower_system_values);
+ OPT(nir_lower_compute_system_values, NULL);
const nir_lower_subgroups_options subgroups_options = {
.ballot_bit_size = 32,
.lower_to_scalar = true,
.lower_vote_trivial = !is_scalar,
.lower_shuffle = true,
+ .lower_quad_broadcast_dynamic = true,
};
OPT(nir_lower_subgroups, &subgroups_options);
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
- OPT(nir_lower_indirect_derefs, indirect_mask);
+ OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
+
+ /* Even in cases where we can handle indirect temporaries via scratch, we
+ * it can still be expensive. Lower indirects on small arrays to
+ * conditional load/stores.
+ *
+ * The threshold of 16 was chosen semi-arbitrarily. The idea is that an
+ * indirect on an array of 16 elements is about 30 instructions at which
+ * point, you may be better off doing a send. With a SIMD8 program, 16
+ * floats is 1/8 of the entire register file. Any array larger than that
+ * is likely to cause pressure issues. Also, this value is sufficiently
+ * high that the benchmarks known to suffer from large temporary array
+ * issues are helped but nothing else in shader-db is hurt except for maybe
+ * that one kerbal space program shader.
+ */
+ if (is_scalar && !(indirect_mask & nir_var_function_temp))
+ OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
/* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
* SSBOs, our back-end is capable of loading an entire vec4 at a time and
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
- NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
- NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
+ NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
+ NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
* varyings we have demoted here.
*/
NIR_PASS_V(producer, nir_lower_indirect_derefs,
- brw_nir_no_indirect_mask(compiler, producer->info.stage));
+ brw_nir_no_indirect_mask(compiler, producer->info.stage),
+ UINT32_MAX);
NIR_PASS_V(consumer, nir_lower_indirect_derefs,
- brw_nir_no_indirect_mask(compiler, consumer->info.stage));
+ brw_nir_no_indirect_mask(compiler, consumer->info.stage),
+ UINT32_MAX);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
}
+static bool
+brw_nir_should_vectorize_mem(unsigned align, unsigned bit_size,
+ unsigned num_components, unsigned high_offset,
+ nir_intrinsic_instr *low,
+ nir_intrinsic_instr *high)
+{
+ /* Don't combine things to generate 64-bit loads/stores. We have to split
+ * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
+ * we don't want to make a mess for the back-end.
+ */
+ if (bit_size > 32)
+ return false;
+
+ /* We can handle at most a vec4 right now. Anything bigger would get
+ * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
+ */
+ if (num_components > 4)
+ return false;
+
+ if (align < bit_size / 8)
+ return false;
+
+ return true;
+}
+
+static
+bool combine_all_barriers(nir_intrinsic_instr *a,
+ nir_intrinsic_instr *b,
+ void *data)
+{
+ /* Translation to backend IR will get rid of modes we don't care about, so
+ * no harm in always combining them.
+ *
+ * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
+ * scheduling so that it can take advantage of the different semantics.
+ */
+ nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
+ nir_intrinsic_memory_modes(b));
+ nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
+ nir_intrinsic_memory_semantics(b));
+ nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
+ nir_intrinsic_memory_scope(b)));
+ return true;
+}
+
+static void
+brw_vectorize_lower_mem_access(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar)
+{
+ const struct gen_device_info *devinfo = compiler->devinfo;
+ bool progress = false;
+
+ if (is_scalar) {
+ OPT(nir_opt_load_store_vectorize,
+ nir_var_mem_ubo | nir_var_mem_ssbo |
+ nir_var_mem_global | nir_var_mem_shared,
+ brw_nir_should_vectorize_mem,
+ (nir_variable_mode)0);
+ }
+
+ OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
+
+ while (progress) {
+ progress = false;
+
+ OPT(nir_lower_pack);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ OPT(nir_opt_algebraic);
+ OPT(nir_opt_constant_folding);
+ }
+}
+
+static bool
+nir_shader_has_local_variables(const nir_shader *nir)
+{
+ nir_foreach_function(func, nir) {
+ if (func->impl && !exec_list_is_empty(&func->impl->locals))
+ return true;
+ }
+
+ return false;
+}
+
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
UNUSED bool progress; /* Written by OPT */
- OPT(brw_nir_lower_mem_access_bit_sizes);
+ OPT(brw_nir_lower_scoped_barriers);
+ OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
do {
progress = false;
brw_nir_optimize(nir, compiler, is_scalar, false);
- if (OPT(nir_lower_int64, nir->options->lower_int64_options))
+ if (is_scalar && nir_shader_has_local_variables(nir)) {
+ OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
+ glsl_get_natural_size_align_bytes);
+ OPT(nir_lower_explicit_io, nir_var_function_temp,
+ nir_address_format_32bit_offset);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
+ }
+
+ brw_vectorize_lower_mem_access(nir, compiler, is_scalar);
+
+ if (OPT(nir_lower_int64))
brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
* havok on the vec4 backend. The handling of constants in the vec4
* backend is not good.
*/
- if (is_scalar) {
+ if (is_scalar)
OPT(nir_opt_constant_folding);
- OPT(nir_copy_prop);
- }
+
+ OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
OPT(brw_nir_lower_conversions);
if (is_scalar)
- OPT(nir_lower_alu_to_scalar, NULL);
- OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
+
+ while (OPT(nir_opt_algebraic_distribute_src_mods)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ }
+
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_lower_bool_to_int32);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
OPT(nir_lower_locals_to_regs);
if (key_tex->swizzles[s] == SWIZZLE_NOOP)
continue;
- tex_options.swizzle_result |= (1 << s);
+ tex_options.swizzle_result |= BITFIELD_BIT(s);
for (unsigned c = 0; c < 4; c++)
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
}
tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
+ tex_options.bt709_external = key_tex->bt709_mask;
+ tex_options.bt2020_external = key_tex->bt2020_mask;
/* Setup array of scaling factors for each texture. */
memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
case nir_op_b32all_iequal4:
return BRW_CONDITIONAL_Z;
- case nir_op_fne:
- case nir_op_fne32:
+ case nir_op_fneu:
+ case nir_op_fneu32:
case nir_op_ine:
case nir_op_ine32:
case nir_op_b32any_fnequal2:
}
}
+uint32_t
+brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
+{
+ switch (atomic->intrinsic) {
+#define AOP_CASE(atom) \
+ case nir_intrinsic_image_atomic_##atom: \
+ case nir_intrinsic_bindless_image_atomic_##atom: \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(add): {
+ unsigned src_idx;
+ switch (atomic->intrinsic) {
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_add:
+ src_idx = 3;
+ break;
+ case nir_intrinsic_ssbo_atomic_add:
+ src_idx = 2;
+ break;
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_global_atomic_add:
+ src_idx = 1;
+ break;
+ default:
+ unreachable("Invalid add atomic opcode");
+ }
+
+ if (nir_src_is_const(atomic->src[src_idx])) {
+ int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
+ if (add_val == 1)
+ return BRW_AOP_INC;
+ else if (add_val == -1)
+ return BRW_AOP_DEC;
+ }
+ return BRW_AOP_ADD;
+ }
+
+ AOP_CASE(imin): return BRW_AOP_IMIN;
+ AOP_CASE(umin): return BRW_AOP_UMIN;
+ AOP_CASE(imax): return BRW_AOP_IMAX;
+ AOP_CASE(umax): return BRW_AOP_UMAX;
+ AOP_CASE(and): return BRW_AOP_AND;
+ AOP_CASE(or): return BRW_AOP_OR;
+ AOP_CASE(xor): return BRW_AOP_XOR;
+ AOP_CASE(exchange): return BRW_AOP_MOV;
+ AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
+
+#undef AOP_CASE
+#define AOP_CASE(atom) \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(fmin): return BRW_AOP_FMIN;
+ AOP_CASE(fmax): return BRW_AOP_FMAX;
+ AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
+
+#undef AOP_CASE
+
+ default:
+ unreachable("Unsupported NIR atomic intrinsic");
+ }
+}
+
enum brw_reg_type
brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
{