#include "freedreno_util.h"
#include "ir3.h"
+#include "ir3_shader.h"
/*
* Copy Propagate:
*/
+struct ir3_cp_ctx {
+ struct ir3 *shader;
+ struct ir3_shader_variant *so;
+ unsigned immediate_idx;
+};
+
/* is it a type preserving mov, with ok flags? */
static bool is_eligible_mov(struct ir3_instruction *instr, bool allow_flags)
{
return false;
/* TODO: remove this hack: */
- if (is_meta(src_instr) && (src_instr->opc == OPC_META_FO))
+ if (src_instr->opc == OPC_META_FO)
return false;
/* TODO: we currently don't handle left/right neighbors
* very well when inserting parallel-copies into phi..
* to avoid problems don't eliminate a mov coming out
* of phi..
*/
- if (is_meta(src_instr) && (src_instr->opc == OPC_META_PHI))
+ if (src_instr->opc == OPC_META_PHI)
return false;
return true;
}
(flags & IR3_REG_RELATIV))
return false;
- /* clear flags that are 'ok' */
- switch (instr->category) {
+ /* TODO it seems to *mostly* work to cp RELATIV, except we get some
+ * intermittent piglit variable-indexing fails. Newer blob driver
+ * doesn't seem to cp these. Possibly this is hw workaround? Not
+ * sure, but until that is understood better, lets just switch off
+ * cp for indirect src's:
+ */
+ if (flags & IR3_REG_RELATIV)
+ return false;
+
+ switch (opc_cat(instr->opc)) {
case 1:
valid_flags = IR3_REG_IMMED | IR3_REG_CONST | IR3_REG_RELATIV;
if (flags & ~valid_flags)
return false;
break;
- case 5:
- /* no flags allowed */
- if (flags)
- return false;
- break;
- case 6:
- valid_flags = IR3_REG_IMMED;
- if (flags & ~valid_flags)
- return false;
- break;
case 2:
valid_flags = ir3_cat2_absneg(instr->opc) |
IR3_REG_CONST | IR3_REG_RELATIV;
case 4:
/* seems like blob compiler avoids const as src.. */
/* TODO double check if this is still the case on a4xx */
- if (flags & IR3_REG_CONST)
+ if (flags & (IR3_REG_CONST | IR3_REG_IMMED))
return false;
if (flags & (IR3_REG_SABS | IR3_REG_SNEG))
return false;
break;
+ case 5:
+ /* no flags allowed */
+ if (flags)
+ return false;
+ break;
+ case 6:
+ valid_flags = IR3_REG_IMMED;
+ if (flags & ~valid_flags)
+ return false;
+
+ if (flags & IR3_REG_IMMED) {
+ /* doesn't seem like we can have immediate src for store
+ * instructions:
+ *
+ * TODO this restriction could also apply to load instructions,
+ * but for load instructions this arg is the address (and not
+ * really sure any good way to test a hard-coded immed addr src)
+ */
+ if (is_store(instr) && (n == 1))
+ return false;
+
+ /* disallow CP into anything but the SSBO slot argument for
+ * atomics:
+ */
+ if (is_atomic(instr->opc) && (n != 0))
+ return false;
+ }
+
+ break;
}
return true;
/* propagate register flags from src to dst.. negates need special
* handling to cancel each other out.
*/
-static void combine_flags(unsigned *dstflags, unsigned srcflags)
+static void combine_flags(unsigned *dstflags, struct ir3_instruction *src)
{
+ unsigned srcflags = src->regs[1]->flags;
+
/* if what we are combining into already has (abs) flags,
* we can drop (neg) from src:
*/
*dstflags |= srcflags & IR3_REG_IMMED;
*dstflags |= srcflags & IR3_REG_RELATIV;
*dstflags |= srcflags & IR3_REG_ARRAY;
+
+ /* if src of the src is boolean we can drop the (abs) since we know
+ * the source value is already a postitive integer. This cleans
+ * up the absnegs that get inserted when converting between nir and
+ * native boolean (see ir3_b2n/n2b)
+ */
+ struct ir3_instruction *srcsrc = ssa(src->regs[1]);
+ if (srcsrc && is_bool(srcsrc))
+ *dstflags &= ~IR3_REG_SABS;
}
-/* the "plain" MAD's (ie. the ones that don't shift first src prior to
- * multiply) can swap their first two srcs if src[0] is !CONST and
- * src[1] is CONST:
- */
-static bool is_valid_mad(struct ir3_instruction *instr)
+static struct ir3_register *
+lower_immed(struct ir3_cp_ctx *ctx, struct ir3_register *reg, unsigned new_flags)
{
- return (instr->category == 3) && is_mad(instr->opc);
+ unsigned swiz, idx, i;
+
+ reg = ir3_reg_clone(ctx->shader, reg);
+
+ /* in some cases, there are restrictions on (abs)/(neg) plus const..
+ * so just evaluate those and clear the flags:
+ */
+ if (new_flags & IR3_REG_SABS) {
+ reg->iim_val = abs(reg->iim_val);
+ new_flags &= ~IR3_REG_SABS;
+ }
+
+ if (new_flags & IR3_REG_FABS) {
+ reg->fim_val = fabs(reg->fim_val);
+ new_flags &= ~IR3_REG_FABS;
+ }
+
+ if (new_flags & IR3_REG_SNEG) {
+ reg->iim_val = -reg->iim_val;
+ new_flags &= ~IR3_REG_SNEG;
+ }
+
+ if (new_flags & IR3_REG_FNEG) {
+ reg->fim_val = -reg->fim_val;
+ new_flags &= ~IR3_REG_FNEG;
+ }
+
+ for (i = 0; i < ctx->immediate_idx; i++) {
+ swiz = i % 4;
+ idx = i / 4;
+
+ if (ctx->so->immediates[idx].val[swiz] == reg->uim_val) {
+ break;
+ }
+ }
+
+ if (i == ctx->immediate_idx) {
+ /* need to generate a new immediate: */
+ swiz = i % 4;
+ idx = i / 4;
+ ctx->so->immediates[idx].val[swiz] = reg->uim_val;
+ ctx->so->immediates_count = idx + 1;
+ ctx->immediate_idx++;
+ }
+
+ new_flags &= ~IR3_REG_IMMED;
+ new_flags |= IR3_REG_CONST;
+ reg->flags = new_flags;
+ reg->num = i + (4 * ctx->so->constbase.immediate);
+
+ return reg;
}
/**
* instruction).
*/
static void
-reg_cp(struct ir3_instruction *instr, struct ir3_register *reg, unsigned n)
+reg_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr,
+ struct ir3_register *reg, unsigned n)
{
struct ir3_instruction *src = ssa(reg);
+ /* don't propagate copies into a PHI, since we don't know if the
+ * src block executed:
+ */
+ if (instr->opc == OPC_META_PHI)
+ return;
+
if (is_eligible_mov(src, true)) {
/* simple case, no immed/const/relativ, only mov's w/ ssa src: */
struct ir3_register *src_reg = src->regs[1];
unsigned new_flags = reg->flags;
- combine_flags(&new_flags, src_reg->flags);
+ combine_flags(&new_flags, src);
if (valid_flags(instr, n, new_flags)) {
if (new_flags & IR3_REG_ARRAY) {
struct ir3_register *src_reg = src->regs[1];
unsigned new_flags = reg->flags;
- combine_flags(&new_flags, src_reg->flags);
+ combine_flags(&new_flags, src);
if (!valid_flags(instr, n, new_flags)) {
+ /* See if lowering an immediate to const would help. */
+ if (valid_flags(instr, n, (new_flags & ~IR3_REG_IMMED) | IR3_REG_CONST)) {
+ debug_assert(new_flags & IR3_REG_IMMED);
+ instr->regs[n + 1] = lower_immed(ctx, src_reg, new_flags);
+ return;
+ }
+
/* special case for "normal" mad instructions, we can
* try swapping the first two args if that fits better.
+ *
+ * the "plain" MAD's (ie. the ones that don't shift first
+ * src prior to multiply) can swap their first two srcs if
+ * src[0] is !CONST and src[1] is CONST:
*/
- if ((n == 1) && is_valid_mad(instr) &&
+ if ((n == 1) && is_mad(instr->opc) &&
!(instr->regs[0 + 1]->flags & (IR3_REG_CONST | IR3_REG_RELATIV)) &&
valid_flags(instr, 0, new_flags)) {
/* swap src[0] and src[1]: */
* just somehow don't work out. This restriction may only
* apply if the first src is also CONST.
*/
- if ((instr->category == 3) && (n == 2) &&
+ if ((opc_cat(instr->opc) == 3) && (n == 2) &&
(src_reg->flags & IR3_REG_RELATIV) &&
(src_reg->array.offset == 0))
return;
if (src_reg->flags & IR3_REG_IMMED) {
int32_t iim_val = src_reg->iim_val;
- debug_assert((instr->category == 1) ||
- (instr->category == 6) ||
- ((instr->category == 2) &&
- ir3_cat2_int(instr->opc)));
+ debug_assert((opc_cat(instr->opc) == 1) ||
+ (opc_cat(instr->opc) == 6) ||
+ ir3_cat2_int(instr->opc));
if (new_flags & IR3_REG_SABS)
iim_val = abs(iim_val);
iim_val = ~iim_val;
/* other than category 1 (mov) we can only encode up to 10 bits: */
- if ((instr->category == 1) || !(iim_val & ~0x3ff)) {
+ if ((instr->opc == OPC_MOV) ||
+ !((iim_val & ~0x3ff) && (-iim_val & ~0x3ff))) {
new_flags &= ~(IR3_REG_SABS | IR3_REG_SNEG | IR3_REG_BNOT);
src_reg = ir3_reg_clone(instr->block->shader, src_reg);
src_reg->flags = new_flags;
src_reg->iim_val = iim_val;
instr->regs[n+1] = src_reg;
+ } else if (valid_flags(instr, n, (new_flags & ~IR3_REG_IMMED) | IR3_REG_CONST)) {
+ /* See if lowering an immediate to const would help. */
+ instr->regs[n+1] = lower_immed(ctx, src_reg, new_flags);
}
return;
* the mov dst with the mov src
*/
static void
-instr_cp(struct ir3_instruction *instr)
+instr_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr)
{
struct ir3_register *reg;
if (!src)
continue;
- instr_cp(src);
+ instr_cp(ctx, src);
/* TODO non-indirect access we could figure out which register
* we actually want and allow cp..
if (reg->flags & IR3_REG_ARRAY)
continue;
- reg_cp(instr, reg, n);
+ reg_cp(ctx, instr, reg, n);
}
if (instr->regs[0]->flags & IR3_REG_ARRAY) {
struct ir3_instruction *src = ssa(instr->regs[0]);
if (src)
- instr_cp(src);
+ instr_cp(ctx, src);
}
if (instr->address) {
- instr_cp(instr->address);
+ instr_cp(ctx, instr->address);
ir3_instr_set_address(instr, eliminate_output_mov(instr->address));
}
+
+ /* we can end up with extra cmps.s from frontend, which uses a
+ *
+ * cmps.s p0.x, cond, 0
+ *
+ * as a way to mov into the predicate register. But frequently 'cond'
+ * is itself a cmps.s/cmps.f/cmps.u. So detect this special case and
+ * just re-write the instruction writing predicate register to get rid
+ * of the double cmps.
+ */
+ if ((instr->opc == OPC_CMPS_S) &&
+ (instr->regs[0]->num == regid(REG_P0, 0)) &&
+ ssa(instr->regs[1]) &&
+ (instr->regs[2]->flags & IR3_REG_IMMED) &&
+ (instr->regs[2]->iim_val == 0)) {
+ struct ir3_instruction *cond = ssa(instr->regs[1]);
+ switch (cond->opc) {
+ case OPC_CMPS_S:
+ case OPC_CMPS_F:
+ case OPC_CMPS_U:
+ instr->opc = cond->opc;
+ instr->flags = cond->flags;
+ instr->cat2 = cond->cat2;
+ instr->address = cond->address;
+ instr->regs[1] = cond->regs[1];
+ instr->regs[2] = cond->regs[2];
+ break;
+ default:
+ break;
+ }
+ }
}
void
-ir3_cp(struct ir3 *ir)
+ir3_cp(struct ir3 *ir, struct ir3_shader_variant *so)
{
+ struct ir3_cp_ctx ctx = {
+ .shader = ir,
+ .so = so,
+ };
+
ir3_clear_mark(ir);
for (unsigned i = 0; i < ir->noutputs; i++) {
if (ir->outputs[i]) {
- instr_cp(ir->outputs[i]);
+ instr_cp(&ctx, ir->outputs[i]);
ir->outputs[i] = eliminate_output_mov(ir->outputs[i]);
}
}
- for (unsigned i = 0; i < ir->keeps_count; i++) {
- instr_cp(ir->keeps[i]);
- ir->keeps[i] = eliminate_output_mov(ir->keeps[i]);
- }
-
list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
if (block->condition) {
- instr_cp(block->condition);
+ instr_cp(&ctx, block->condition);
block->condition = eliminate_output_mov(block->condition);
}
+
+ for (unsigned i = 0; i < block->keeps_count; i++) {
+ instr_cp(&ctx, block->keeps[i]);
+ block->keeps[i] = eliminate_output_mov(block->keeps[i]);
+ }
}
}