#include "compiler/nir/nir_builder.h"
#include "util/u_math.h"
-static bool
-is_input(nir_intrinsic_instr *intrin)
-{
- return intrin->intrinsic == nir_intrinsic_load_input ||
- intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
- intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
- intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
-}
-
-static bool
-is_output(nir_intrinsic_instr *intrin)
-{
- return intrin->intrinsic == nir_intrinsic_load_output ||
- intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
- intrin->intrinsic == nir_intrinsic_store_output ||
- intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
-}
-
-/**
- * In many cases, we just add the base and offset together, so there's no
- * reason to keep them separate. Sometimes, combining them is essential:
- * if a shader only accesses part of a compound variable (such as a matrix
- * or array), the variable's base may not actually exist in the VUE map.
- *
- * This pass adds constant offsets to instr->const_index[0], and resets
- * the offset source to 0. Non-constant offsets remain unchanged - since
- * we don't know what part of a compound variable is accessed, we allocate
- * storage for the entire thing.
- */
-
-static bool
-add_const_offset_to_base_block(nir_block *block, nir_builder *b,
- nir_variable_mode mode)
-{
- nir_foreach_instr_safe(instr, block) {
- if (instr->type != nir_instr_type_intrinsic)
- continue;
-
- nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
- if ((mode == nir_var_shader_in && is_input(intrin)) ||
- (mode == nir_var_shader_out && is_output(intrin))) {
- nir_src *offset = nir_get_io_offset_src(intrin);
-
- if (nir_src_is_const(*offset)) {
- intrin->const_index[0] += nir_src_as_uint(*offset);
- b->cursor = nir_before_instr(&intrin->instr);
- nir_instr_rewrite_src(&intrin->instr, offset,
- nir_src_for_ssa(nir_imm_int(b, 0)));
- }
- }
- }
- return true;
-}
-
-static void
-add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
-{
- nir_foreach_function(f, nir) {
- if (f->impl) {
- nir_builder b;
- nir_builder_init(&b, f->impl);
- nir_foreach_block(block, f->impl) {
- add_const_offset_to_base_block(block, &b, mode);
- }
- }
- }
-}
-
static bool
remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
GLenum primitive_mode)
return true;
}
+static bool
+is_input(nir_intrinsic_instr *intrin)
+{
+ return intrin->intrinsic == nir_intrinsic_load_input ||
+ intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
+ intrin->intrinsic == nir_intrinsic_load_interpolated_input;
+}
+
+static bool
+is_output(nir_intrinsic_instr *intrin)
+{
+ return intrin->intrinsic == nir_intrinsic_load_output ||
+ intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+ intrin->intrinsic == nir_intrinsic_store_output ||
+ intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+}
+
+
static bool
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map,
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
}
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (!function->impl)
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function(function, nir) {
if (function->impl) {
}
}
- nir_lower_io_options lower_io_options = 0;
+ nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
if (key->persample_interp)
lower_io_options |= nir_lower_io_force_sample_interpolation;
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
+ if (devinfo->gen >= 11)
+ nir_lower_interpolation(nir, ~0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
}
void
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
}
void
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_out);
+ nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
nir_foreach_function(function, nir) {
if (function->impl) {
OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
}
OPT(nir_copy_prop);
(nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL);
OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
- OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+ OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
compiler->devinfo->gen >= 6);
OPT(nir_opt_intrinsics);
OPT(nir_opt_constant_folding);
if (lower_flrp != 0) {
- /* To match the old behavior, set always_precise only for scalar
- * shader stages.
- */
if (OPT(nir_lower_flrp,
lower_flrp,
false /* always_precise */,
OPT(nir_opt_dce);
}
OPT(nir_opt_if, false);
+ OPT(nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations != 0) {
OPT(nir_opt_loop_unroll, indirect_mask);
}
/* Workaround Gfxbench unused local sampler variable which will trigger an
* assert in the opt_large_constants pass.
*/
- OPT(nir_remove_dead_variables, nir_var_function_temp);
+ OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
}
static unsigned
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
if (is_scalar) {
- OPT(nir_lower_alu_to_scalar, NULL);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
}
if (nir->info.stage == MESA_SHADER_GEOMETRY)
- OPT(nir_lower_gs_intrinsics);
+ OPT(nir_lower_gs_intrinsics, false);
/* See also brw_nir_trig_workarounds.py */
if (compiler->precise_trig &&
!(devinfo->gen >= 10 || devinfo->is_kabylake))
OPT(brw_nir_apply_trig_workarounds);
+ if (devinfo->gen >= 12)
+ OPT(brw_nir_clamp_image_1d_2d_array_sizes);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_txf_offset = true,
brw_nir_optimize(nir, compiler, is_scalar, true);
- bool lowered_64bit_ops = false;
- do {
- progress = false;
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
- /* Necessary to lower add -> sub and div -> mul/rcp */
- OPT(nir_opt_algebraic);
+ if (is_scalar) {
+ OPT(nir_lower_load_const_to_scalar);
+ }
- lowered_64bit_ops |= progress;
- } while (progress);
+ /* Lower a bunch of stuff */
+ OPT(nir_lower_var_copies);
/* This needs to be run after the first optimization pass but before we
* lower indirect derefs away
OPT(nir_opt_large_constants, NULL, 32);
}
- OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
-
- if (is_scalar) {
- OPT(nir_lower_load_const_to_scalar);
- }
-
- /* Lower a bunch of stuff */
- OPT(nir_lower_var_copies);
-
OPT(nir_lower_system_values);
const nir_lower_subgroups_options subgroups_options = {
- .subgroup_size = BRW_SUBGROUP_SIZE,
.ballot_bit_size = 32,
.lower_to_scalar = true,
- .lower_subgroup_masks = true,
.lower_vote_trivial = !is_scalar,
.lower_shuffle = true,
+ .lower_quad_broadcast_dynamic = true,
};
OPT(nir_lower_subgroups, &subgroups_options);
OPT(nir_lower_clip_cull_distance_arrays);
+ if ((devinfo->gen >= 8 || devinfo->is_haswell) && is_scalar) {
+ /* TODO: Yes, we could in theory do this on gen6 and earlier. However,
+ * that would require plumbing through support for these indirect
+ * scratch read/write messages with message registers and that's just a
+ * pain. Also, the primary benefit of this is for compute shaders which
+ * won't run on gen6 and earlier anyway.
+ *
+ * On gen7 and earlier the scratch space size is limited to 12kB.
+ * By enabling this optimization we may easily exceed this limit without
+ * having any fallback.
+ *
+ * The threshold of 128B was chosen semi-arbitrarily. The idea is that
+ * 128B per channel on a SIMD8 program is 32 registers or 25% of the
+ * register file. Any array that large is likely to cause pressure
+ * issues. Also, this value is sufficiently high that the benchmarks
+ * known to suffer from large temporary array issues are helped but
+ * nothing else in shader-db is hurt except for maybe that one kerbal
+ * space program shader.
+ */
+ OPT(nir_lower_vars_to_scratch, nir_var_function_temp, 128,
+ glsl_get_natural_size_align_bytes);
+ }
+
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask);
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
- NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
- NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
+ NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
+ NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
}
}
+static bool
+brw_nir_should_vectorize_mem(unsigned align, unsigned bit_size,
+ unsigned num_components, unsigned high_offset,
+ nir_intrinsic_instr *low,
+ nir_intrinsic_instr *high)
+{
+ /* Don't combine things to generate 64-bit loads/stores. We have to split
+ * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
+ * we don't want to make a mess for the back-end.
+ */
+ if (bit_size > 32)
+ return false;
+
+ /* We can handle at most a vec4 right now. Anything bigger would get
+ * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
+ */
+ if (num_components > 4)
+ return false;
+
+ if (align < bit_size / 8)
+ return false;
+
+ return true;
+}
+
+static
+bool combine_all_barriers(nir_intrinsic_instr *a,
+ nir_intrinsic_instr *b,
+ void *data)
+{
+ /* Translation to backend IR will get rid of modes we don't care about, so
+ * no harm in always combining them.
+ *
+ * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
+ * scheduling so that it can take advantage of the different semantics.
+ */
+ nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
+ nir_intrinsic_memory_modes(b));
+ nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
+ nir_intrinsic_memory_semantics(b));
+ nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
+ nir_intrinsic_memory_scope(b)));
+ return true;
+}
+
+static void
+brw_vectorize_lower_mem_access(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar)
+{
+ const struct gen_device_info *devinfo = compiler->devinfo;
+ bool progress = false;
+
+ if (is_scalar) {
+ OPT(nir_opt_load_store_vectorize,
+ nir_var_mem_ubo | nir_var_mem_ssbo |
+ nir_var_mem_global | nir_var_mem_shared,
+ brw_nir_should_vectorize_mem,
+ (nir_variable_mode)0);
+ }
+
+ OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
+
+ while (progress) {
+ progress = false;
+
+ OPT(nir_lower_pack);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ OPT(nir_opt_algebraic);
+ OPT(nir_opt_constant_folding);
+ }
+}
+
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
UNUSED bool progress; /* Written by OPT */
- OPT(brw_nir_lower_mem_access_bit_sizes);
- OPT(nir_lower_int64, nir->options->lower_int64_options);
+ OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
do {
progress = false;
brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_vectorize_lower_mem_access(nir, compiler, is_scalar);
+
+ if (OPT(nir_lower_int64, nir->options->lower_int64_options))
+ brw_nir_optimize(nir, compiler, is_scalar, false);
+
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(brw_nir_opt_peephole_ffma);
* havok on the vec4 backend. The handling of constants in the vec4
* backend is not good.
*/
- if (is_scalar) {
+ if (is_scalar)
OPT(nir_opt_constant_folding);
- OPT(nir_copy_prop);
- }
+
+ OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
OPT(brw_nir_lower_conversions);
if (is_scalar)
- OPT(nir_lower_alu_to_scalar, NULL);
- OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
+ OPT(nir_lower_alu_to_scalar, NULL, NULL);
+
+ while (OPT(nir_opt_algebraic_distribute_src_mods)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+ }
+
OPT(nir_copy_prop);
OPT(nir_opt_dce);
- OPT(nir_opt_move_comparisons);
+ OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_lower_bool_to_int32);
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
OPT(nir_lower_locals_to_regs);
}
}
-void
+static bool
brw_nir_apply_sampler_key(nir_shader *nir,
const struct brw_compiler *compiler,
- const struct brw_sampler_prog_key_data *key_tex,
- bool is_scalar)
+ const struct brw_sampler_prog_key_data *key_tex)
{
const struct gen_device_info *devinfo = compiler->devinfo;
nir_lower_tex_options tex_options = {
if (key_tex->swizzles[s] == SWIZZLE_NOOP)
continue;
- tex_options.swizzle_result |= (1 << s);
+ tex_options.swizzle_result |= BITFIELD_BIT(s);
for (unsigned c = 0; c < 4; c++)
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
}
memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
sizeof(tex_options.scale_factors));
- if (nir_lower_tex(nir, &tex_options)) {
- nir_validate_shader(nir, "after nir_lower_tex");
+ return nir_lower_tex(nir, &tex_options);
+}
+
+static unsigned
+get_subgroup_size(gl_shader_stage stage,
+ const struct brw_base_prog_key *key,
+ unsigned max_subgroup_size)
+{
+ switch (key->subgroup_size_type) {
+ case BRW_SUBGROUP_SIZE_API_CONSTANT:
+ /* We have to use the global constant size. */
+ return BRW_SUBGROUP_SIZE;
+
+ case BRW_SUBGROUP_SIZE_UNIFORM:
+ /* It has to be uniform across all invocations but can vary per stage
+ * if we want. This gives us a bit more freedom.
+ *
+ * For compute, brw_nir_apply_key is called per-dispatch-width so this
+ * is the actual subgroup size and not a maximum. However, we only
+ * invoke one size of any given compute shader so it's still guaranteed
+ * to be uniform across invocations.
+ */
+ return max_subgroup_size;
+
+ case BRW_SUBGROUP_SIZE_VARYING:
+ /* The subgroup size is allowed to be fully varying. For geometry
+ * stages, we know it's always 8 which is max_subgroup_size so we can
+ * return that. For compute, brw_nir_apply_key is called once per
+ * dispatch-width so max_subgroup_size is the real subgroup size.
+ *
+ * For fragment, we return 0 and let it fall through to the back-end
+ * compiler. This means we can't optimize based on subgroup size but
+ * that's a risk the client took when it asked for a varying subgroup
+ * size.
+ */
+ return stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
+
+ case BRW_SUBGROUP_SIZE_REQUIRE_8:
+ case BRW_SUBGROUP_SIZE_REQUIRE_16:
+ case BRW_SUBGROUP_SIZE_REQUIRE_32:
+ assert(stage == MESA_SHADER_COMPUTE);
+ /* These enum values are expressly chosen to be equal to the subgroup
+ * size that they require.
+ */
+ return key->subgroup_size_type;
+ }
+
+ unreachable("Invalid subgroup size type");
+}
+
+void
+brw_nir_apply_key(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ const struct brw_base_prog_key *key,
+ unsigned max_subgroup_size,
+ bool is_scalar)
+{
+ bool progress = false;
+
+ OPT(brw_nir_apply_sampler_key, compiler, &key->tex);
+
+ const nir_lower_subgroups_options subgroups_options = {
+ .subgroup_size = get_subgroup_size(nir->info.stage, key,
+ max_subgroup_size),
+ .ballot_bit_size = 32,
+ .lower_subgroup_masks = true,
+ };
+ OPT(nir_lower_subgroups, &subgroups_options);
+
+ if (progress)
brw_nir_optimize(nir, compiler, is_scalar, false);
+}
+
+enum brw_conditional_mod
+brw_cmod_for_nir_comparison(nir_op op)
+{
+ switch (op) {
+ case nir_op_flt:
+ case nir_op_flt32:
+ case nir_op_ilt:
+ case nir_op_ilt32:
+ case nir_op_ult:
+ case nir_op_ult32:
+ return BRW_CONDITIONAL_L;
+
+ case nir_op_fge:
+ case nir_op_fge32:
+ case nir_op_ige:
+ case nir_op_ige32:
+ case nir_op_uge:
+ case nir_op_uge32:
+ return BRW_CONDITIONAL_GE;
+
+ case nir_op_feq:
+ case nir_op_feq32:
+ case nir_op_ieq:
+ case nir_op_ieq32:
+ case nir_op_b32all_fequal2:
+ case nir_op_b32all_iequal2:
+ case nir_op_b32all_fequal3:
+ case nir_op_b32all_iequal3:
+ case nir_op_b32all_fequal4:
+ case nir_op_b32all_iequal4:
+ return BRW_CONDITIONAL_Z;
+
+ case nir_op_fne:
+ case nir_op_fne32:
+ case nir_op_ine:
+ case nir_op_ine32:
+ case nir_op_b32any_fnequal2:
+ case nir_op_b32any_inequal2:
+ case nir_op_b32any_fnequal3:
+ case nir_op_b32any_inequal3:
+ case nir_op_b32any_fnequal4:
+ case nir_op_b32any_inequal4:
+ return BRW_CONDITIONAL_NZ;
+
+ default:
+ unreachable("Unsupported NIR comparison op");
+ }
+}
+
+uint32_t
+brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
+{
+ switch (atomic->intrinsic) {
+#define AOP_CASE(atom) \
+ case nir_intrinsic_image_atomic_##atom: \
+ case nir_intrinsic_bindless_image_atomic_##atom: \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(add): {
+ unsigned src_idx;
+ switch (atomic->intrinsic) {
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_add:
+ src_idx = 3;
+ break;
+ case nir_intrinsic_ssbo_atomic_add:
+ src_idx = 2;
+ break;
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_global_atomic_add:
+ src_idx = 1;
+ break;
+ default:
+ unreachable("Invalid add atomic opcode");
+ }
+
+ if (nir_src_is_const(atomic->src[src_idx])) {
+ int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
+ if (add_val == 1)
+ return BRW_AOP_INC;
+ else if (add_val == -1)
+ return BRW_AOP_DEC;
+ }
+ return BRW_AOP_ADD;
+ }
+
+ AOP_CASE(imin): return BRW_AOP_IMIN;
+ AOP_CASE(umin): return BRW_AOP_UMIN;
+ AOP_CASE(imax): return BRW_AOP_IMAX;
+ AOP_CASE(umax): return BRW_AOP_UMAX;
+ AOP_CASE(and): return BRW_AOP_AND;
+ AOP_CASE(or): return BRW_AOP_OR;
+ AOP_CASE(xor): return BRW_AOP_XOR;
+ AOP_CASE(exchange): return BRW_AOP_MOV;
+ AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
+
+#undef AOP_CASE
+#define AOP_CASE(atom) \
+ case nir_intrinsic_ssbo_atomic_##atom: \
+ case nir_intrinsic_shared_atomic_##atom: \
+ case nir_intrinsic_global_atomic_##atom
+
+ AOP_CASE(fmin): return BRW_AOP_FMIN;
+ AOP_CASE(fmax): return BRW_AOP_FMAX;
+ AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
+
+#undef AOP_CASE
+
+ default:
+ unreachable("Unsupported NIR atomic intrinsic");
}
}