From: Jason Ekstrand Date: Thu, 11 Jul 2019 20:20:39 +0000 (-0500) Subject: nir/alu_to_scalar: Use the new NIR lowering framework X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=c1cffa4249ca62622ae22dab3097addac86dcb62;p=mesa.git nir/alu_to_scalar: Use the new NIR lowering framework Reviewed-by: Eric Anholt --- diff --git a/src/compiler/nir/nir_lower_alu_to_scalar.c b/src/compiler/nir/nir_lower_alu_to_scalar.c index 4814c2d09ec..d346ee7a94a 100644 --- a/src/compiler/nir/nir_lower_alu_to_scalar.c +++ b/src/compiler/nir/nir_lower_alu_to_scalar.c @@ -30,6 +30,23 @@ * arguments with individual per-channel operations. */ +static bool +inst_is_vector_alu(const nir_instr *instr, const void *_state) +{ + if (instr->type != nir_instr_type_alu) + return false; + + nir_alu_instr *alu = nir_instr_as_alu(instr); + + /* There is no ALU instruction which has a scalar destination, scalar + * src[0], and some other vector source. + */ + assert(alu->dest.dest.is_ssa); + assert(alu->src[0].src.is_ssa); + return alu->dest.dest.ssa.num_components > 1 || + alu->src[0].src.ssa->num_components > 1; +} + static void nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components, unsigned bit_size) @@ -39,7 +56,7 @@ nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components, alu->dest.write_mask = (1 << num_components) - 1; } -static void +static nir_ssa_def * lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op, nir_builder *builder) { @@ -68,14 +85,14 @@ lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op, } } - assert(alu->dest.write_mask == 1); - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(last)); - nir_instr_remove(&alu->instr); + return last; } -static bool -lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_set) +static nir_ssa_def * +lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_state) { + BITSET_WORD *lower_set = _state; + nir_alu_instr *alu = nir_instr_as_alu(instr); unsigned num_src = nir_op_infos[alu->op].num_inputs; unsigned i, chan; @@ -86,14 +103,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se b->exact = alu->exact; if (lower_set && !BITSET_TEST(lower_set, alu->op)) - return false; + return NULL; #define LOWER_REDUCTION(name, chan, merge) \ case name##2: \ case name##3: \ case name##4: \ - lower_reduction(alu, chan, merge, b); \ - return true; + return lower_reduction(alu, chan, merge, b); \ switch (alu->op) { case nir_op_vec4: @@ -104,21 +120,16 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se /* We don't need to scalarize these ops, they're the ones generated to * group up outputs into a value that can be SSAed. */ - return false; + return NULL; - case nir_op_pack_half_2x16: + case nir_op_pack_half_2x16: { if (!b->shader->options->lower_pack_half_2x16) - return false; + return NULL; nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0); - - nir_ssa_def *val = - nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0), - nir_channel(b, src_vec2, 1)); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val)); - nir_instr_remove(&alu->instr); - return true; + return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0), + nir_channel(b, src_vec2, 1)); + } case nir_op_unpack_unorm_4x8: case nir_op_unpack_snorm_4x8: @@ -127,22 +138,15 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se /* There is no scalar version of these ops, unless we were to break it * down to bitshifts and math (which is definitely not intended). */ - return false; + return NULL; case nir_op_unpack_half_2x16: { if (!b->shader->options->lower_unpack_half_2x16) - return false; + return NULL; nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0); - - nir_ssa_def *comps[2]; - comps[0] = nir_unpack_half_2x16_split_x(b, packed); - comps[1] = nir_unpack_half_2x16_split_y(b, packed); - nir_ssa_def *vec = nir_vec(b, comps, 2); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec)); - nir_instr_remove(&alu->instr); - return true; + return nir_vec2(b, nir_unpack_half_2x16_split_x(b, packed), + nir_unpack_half_2x16_split_y(b, packed)); } case nir_op_pack_uvec2_to_uint: { @@ -151,13 +155,9 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0), nir_imm_int(b, 0)); - nir_ssa_def *val = - nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)), - nir_channel(b, word, 0)); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val)); - nir_instr_remove(&alu->instr); - break; + return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), + nir_imm_int(b, 16)), + nir_channel(b, word, 0)); } case nir_op_pack_uvec4_to_uint: { @@ -166,15 +166,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0), nir_imm_int(b, 0)); - nir_ssa_def *val = - nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)), - nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))), - nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)), - nir_channel(b, byte, 0))); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val)); - nir_instr_remove(&alu->instr); - break; + return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), + nir_imm_int(b, 24)), + nir_ishl(b, nir_channel(b, byte, 2), + nir_imm_int(b, 16))), + nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), + nir_imm_int(b, 8)), + nir_channel(b, byte, 0))); } case nir_op_fdph: { @@ -188,17 +186,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se } sum[3] = nir_channel(b, src1_vec, 3); - nir_ssa_def *val = nir_fadd(b, nir_fadd(b, sum[0], sum[1]), - nir_fadd(b, sum[2], sum[3])); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val)); - nir_instr_remove(&alu->instr); - return true; + return nir_fadd(b, nir_fadd(b, sum[0], sum[1]), + nir_fadd(b, sum[2], sum[3])); } case nir_op_unpack_64_2x32: case nir_op_unpack_32_2x16: - return false; + return NULL; LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd); LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand); @@ -217,7 +211,7 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se } if (alu->dest.dest.ssa.num_components == 1) - return false; + return NULL; unsigned num_components = alu->dest.dest.ssa.num_components; nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL }; @@ -248,47 +242,14 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se nir_builder_instr_insert(b, &lower->instr); } - nir_ssa_def *vec = nir_vec(b, comps, num_components); - - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec)); - - nir_instr_remove(&alu->instr); - return true; -} - -static bool -nir_lower_alu_to_scalar_impl(nir_function_impl *impl, BITSET_WORD *lower_set) -{ - nir_builder builder; - nir_builder_init(&builder, impl); - bool progress = false; - - nir_foreach_block(block, impl) { - nir_foreach_instr_safe(instr, block) { - if (instr->type == nir_instr_type_alu) { - progress = lower_alu_instr_scalar(nir_instr_as_alu(instr), - &builder, - lower_set) || progress; - } - } - } - - nir_metadata_preserve(impl, nir_metadata_block_index | - nir_metadata_dominance); - - return progress; + return nir_vec(b, comps, num_components); } bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set) { - bool progress = false; - - nir_foreach_function(function, shader) { - if (function->impl) - progress = nir_lower_alu_to_scalar_impl(function->impl, - lower_set) || progress; - } - - return progress; + return nir_shader_lower_instructions(shader, + inst_is_vector_alu, + lower_alu_instr_scalar, + lower_set); }