#include "nir.h"
#include "nir_builder.h"
+struct alu_to_scalar_data {
+ nir_instr_filter_cb cb;
+ const void *data;
+};
+
/** @file nir_lower_alu_to_scalar.c
*
* Replaces nir_alu_instr operations with more than one channel used in the
* arguments with individual per-channel operations.
*/
+static bool
+inst_is_vector_alu(const nir_instr *instr, const void *_state)
+{
+ if (instr->type != nir_instr_type_alu)
+ return false;
+
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+
+ /* There is no ALU instruction which has a scalar destination, scalar
+ * src[0], and some other vector source.
+ */
+ assert(alu->dest.dest.is_ssa);
+ assert(alu->src[0].src.is_ssa);
+ return alu->dest.dest.ssa.num_components > 1 ||
+ nir_op_infos[alu->op].input_sizes[0] > 1;
+}
+
static void
nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
unsigned bit_size)
alu->dest.write_mask = (1 << num_components) - 1;
}
-static void
+static nir_ssa_def *
lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
nir_builder *builder)
{
}
}
- assert(alu->dest.write_mask == 1);
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(last));
- nir_instr_remove(&alu->instr);
+ return last;
}
-static bool
-lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_set)
+static nir_ssa_def *
+lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_data)
{
+ struct alu_to_scalar_data *data = _data;
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan;
b->cursor = nir_before_instr(&alu->instr);
b->exact = alu->exact;
- if (lower_set && !BITSET_TEST(lower_set, alu->op))
- return false;
+ if (data->cb && !data->cb(instr, data->data))
+ return NULL;
#define LOWER_REDUCTION(name, chan, merge) \
case name##2: \
case name##3: \
case name##4: \
- lower_reduction(alu, chan, merge, b); \
- return true;
+ case name##8: \
+ case name##16: \
+ return lower_reduction(alu, chan, merge, b); \
switch (alu->op) {
+ case nir_op_vec16:
+ case nir_op_vec8:
case nir_op_vec4:
case nir_op_vec3:
case nir_op_vec2:
/* We don't need to scalarize these ops, they're the ones generated to
* group up outputs into a value that can be SSAed.
*/
- return false;
+ return NULL;
- case nir_op_pack_half_2x16:
+ case nir_op_pack_half_2x16: {
if (!b->shader->options->lower_pack_half_2x16)
- return false;
+ return NULL;
nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
-
- nir_ssa_def *val =
- nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
- nir_channel(b, src_vec2, 1));
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&alu->instr);
- return true;
+ return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
+ nir_channel(b, src_vec2, 1));
+ }
case nir_op_unpack_unorm_4x8:
case nir_op_unpack_snorm_4x8:
/* There is no scalar version of these ops, unless we were to break it
* down to bitshifts and math (which is definitely not intended).
*/
- return false;
+ return NULL;
+ case nir_op_unpack_half_2x16_flush_to_zero:
case nir_op_unpack_half_2x16: {
if (!b->shader->options->lower_unpack_half_2x16)
- return false;
+ return NULL;
nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
-
- nir_ssa_def *comps[2];
- comps[0] = nir_unpack_half_2x16_split_x(b, packed);
- comps[1] = nir_unpack_half_2x16_split_y(b, packed);
- nir_ssa_def *vec = nir_vec(b, comps, 2);
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
- nir_instr_remove(&alu->instr);
- return true;
+ if (alu->op == nir_op_unpack_half_2x16_flush_to_zero) {
+ return nir_vec2(b,
+ nir_unpack_half_2x16_split_x_flush_to_zero(b,
+ packed),
+ nir_unpack_half_2x16_split_y_flush_to_zero(b,
+ packed));
+ } else {
+ return nir_vec2(b,
+ nir_unpack_half_2x16_split_x(b, packed),
+ nir_unpack_half_2x16_split_y(b, packed));
+ }
}
case nir_op_pack_uvec2_to_uint: {
nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0));
- nir_ssa_def *val =
- nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)),
- nir_channel(b, word, 0));
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&alu->instr);
- break;
+ return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1),
+ nir_imm_int(b, 16)),
+ nir_channel(b, word, 0));
}
case nir_op_pack_uvec4_to_uint: {
nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0));
- nir_ssa_def *val =
- nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)),
- nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))),
- nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)),
- nir_channel(b, byte, 0)));
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&alu->instr);
- break;
+ return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3),
+ nir_imm_int(b, 24)),
+ nir_ishl(b, nir_channel(b, byte, 2),
+ nir_imm_int(b, 16))),
+ nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1),
+ nir_imm_int(b, 8)),
+ nir_channel(b, byte, 0)));
}
case nir_op_fdph: {
}
sum[3] = nir_channel(b, src1_vec, 3);
- nir_ssa_def *val = nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
- nir_fadd(b, sum[2], sum[3]));
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
- nir_instr_remove(&alu->instr);
- return true;
+ return nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
+ nir_fadd(b, sum[2], sum[3]));
}
case nir_op_unpack_64_2x32:
+ case nir_op_unpack_64_4x16:
case nir_op_unpack_32_2x16:
- return false;
+ return NULL;
LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand);
LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fne, nir_op_ior);
LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
+ LOWER_REDUCTION(nir_op_b8all_fequal, nir_op_feq8, nir_op_iand);
+ LOWER_REDUCTION(nir_op_b8all_iequal, nir_op_ieq8, nir_op_iand);
+ LOWER_REDUCTION(nir_op_b8any_fnequal, nir_op_fne8, nir_op_ior);
+ LOWER_REDUCTION(nir_op_b8any_inequal, nir_op_ine8, nir_op_ior);
+ LOWER_REDUCTION(nir_op_b16all_fequal, nir_op_feq16, nir_op_iand);
+ LOWER_REDUCTION(nir_op_b16all_iequal, nir_op_ieq16, nir_op_iand);
+ LOWER_REDUCTION(nir_op_b16any_fnequal, nir_op_fne16, nir_op_ior);
+ LOWER_REDUCTION(nir_op_b16any_inequal, nir_op_ine16, nir_op_ior);
LOWER_REDUCTION(nir_op_b32all_fequal, nir_op_feq32, nir_op_iand);
LOWER_REDUCTION(nir_op_b32all_iequal, nir_op_ieq32, nir_op_iand);
LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fne32, nir_op_ior);
}
if (alu->dest.dest.ssa.num_components == 1)
- return false;
+ return NULL;
unsigned num_components = alu->dest.dest.ssa.num_components;
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
nir_builder_instr_insert(b, &lower->instr);
}
- nir_ssa_def *vec = nir_vec(b, comps, num_components);
-
- nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
-
- nir_instr_remove(&alu->instr);
- return true;
-}
-
-static bool
-nir_lower_alu_to_scalar_impl(nir_function_impl *impl, BITSET_WORD *lower_set)
-{
- nir_builder builder;
- nir_builder_init(&builder, impl);
- bool progress = false;
-
- nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(instr, block) {
- if (instr->type == nir_instr_type_alu) {
- progress = lower_alu_instr_scalar(nir_instr_as_alu(instr),
- &builder,
- lower_set) || progress;
- }
- }
- }
-
- nir_metadata_preserve(impl, nir_metadata_block_index |
- nir_metadata_dominance);
-
- return progress;
+ return nir_vec(b, comps, num_components);
}
bool
-nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set)
+nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *_data)
{
- bool progress = false;
-
- nir_foreach_function(function, shader) {
- if (function->impl)
- progress = nir_lower_alu_to_scalar_impl(function->impl,
- lower_set) || progress;
- }
-
- return progress;
+ struct alu_to_scalar_data data = {
+ .cb = cb,
+ .data = _data,
+ };
+
+ return nir_shader_lower_instructions(shader,
+ inst_is_vector_alu,
+ lower_alu_instr_scalar,
+ &data);
}