/* Masks in a saneish format. One bit per channel, not packed fancy.
* Use this instead of the op specific ones, and switch over at emit
* time */
+
uint16_t mask;
+ /* For ALU ops only: set to true to invert (bitwise NOT) the
+ * destination of an integer-out op. Not imeplemented in hardware but
+ * allows more optimizations */
+
+ bool invert;
+
union {
midgard_load_store_word load_store;
midgard_vector_alu alu;
bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
+void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
+
#endif
}
}
-static unsigned
-make_compiler_temp(compiler_context *ctx)
-{
- return ctx->func->impl->ssa_alloc + ctx->func->impl->reg_alloc + ctx->temp_alloc++;
-}
-
static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
unsigned *dest)
{
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
- /* Second op implicit #0 */
- ALU_CASE(inot, inor);
+ /* We'll set invert */
+ ALU_CASE(inot, imov);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
} else if (instr->op == nir_op_inot) {
- /* ~b = ~(b & b), so duplicate the source */
- ins.ssa_args.src1 = ins.ssa_args.src0;
- ins.alu.src2 = ins.alu.src1;
+ ins.invert = true;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
}
} while (progress);
+ mir_foreach_block(ctx, block) {
+ midgard_lower_invert(ctx, block);
+ }
+
/* Nested control-flow can result in dead branches at the end of the
* block. This messes with our analysis and is just dead code, so cull
* them */
midgard_opt_cull_dead_branch(ctx, block);
}
+ /* Ensure we were lowered */
+ mir_foreach_instr_global(ctx, ins) {
+ assert(!ins->invert);
+ }
+
/* Schedule! */
schedule_program(ctx);
--- /dev/null
+/*
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "compiler.h"
+#include "midgard_ops.h"
+
+/* Lowers the invert field on instructions to a dedicated inot (inor)
+ * instruction instead, as invert is not always supported natively by the
+ * hardware */
+
+void
+midgard_lower_invert(compiler_context *ctx, midgard_block *block)
+{
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!ins->invert) continue;
+
+ unsigned temp = make_compiler_temp(ctx);
+
+ midgard_instruction not = {
+ .type = TAG_ALU_4,
+ .mask = ins->mask,
+ .ssa_args = {
+ .src0 = temp,
+ .src1 = 0,
+ .dest = ins->ssa_args.dest,
+ .inline_constant = true
+ },
+ .alu = {
+ .op = midgard_alu_op_inor,
+ /* TODO: i16 */
+ .reg_mode = midgard_reg_mode_32,
+ .dest_override = midgard_dest_override_none,
+ .outmod = midgard_outmod_int_wrap,
+ .src1 = vector_alu_srco_unsigned(blank_alu_src),
+ .src2 = vector_alu_srco_unsigned(zero_alu_src)
+ },
+ };
+
+ ins->ssa_args.dest = temp;
+ ins->invert = false;
+ mir_insert_instruction_before(mir_next_op(ins), not);
+ }
+}