#define ACP_HASH_SIZE 16
-#include "main/bitset.h"
+#include "util/bitset.h"
#include "brw_fs.h"
#include "brw_cfg.h"
+#include "brw_eu.h"
namespace { /* avoid conflict with opt_copy_propagation_elements */
struct acp_entry : public exec_node {
fs_reg dst;
fs_reg src;
+ uint8_t regs_written;
enum opcode opcode;
+ bool saturate;
};
struct block_data {
void setup_initial_values();
void run();
- void dump_block_data() const;
+ void dump_block_data() const UNUSED;
void *mem_ctx;
cfg_t *cfg;
bd = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
num_acp = 0;
- for (int b = 0; b < cfg->num_blocks; b++) {
+ foreach_block (block, cfg) {
for (int i = 0; i < ACP_HASH_SIZE; i++) {
- num_acp += out_acp[b][i].length();
+ num_acp += out_acp[block->num][i].length();
}
}
bitset_words = BITSET_WORDS(num_acp);
int next_acp = 0;
- for (int b = 0; b < cfg->num_blocks; b++) {
- bd[b].livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
- bd[b].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
- bd[b].copy = rzalloc_array(bd, BITSET_WORD, bitset_words);
- bd[b].kill = rzalloc_array(bd, BITSET_WORD, bitset_words);
+ foreach_block (block, cfg) {
+ bd[block->num].livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
+ bd[block->num].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
+ bd[block->num].copy = rzalloc_array(bd, BITSET_WORD, bitset_words);
+ bd[block->num].kill = rzalloc_array(bd, BITSET_WORD, bitset_words);
for (int i = 0; i < ACP_HASH_SIZE; i++) {
- foreach_in_list(acp_entry, entry, &out_acp[b][i]) {
+ foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) {
acp[next_acp] = entry;
/* opt_copy_propagate_local populates out_acp with copies created
* in a block which are still live at the end of the block. This
* is exactly what we want in the COPY set.
*/
- BITSET_SET(bd[b].copy, next_acp);
+ BITSET_SET(bd[block->num].copy, next_acp);
next_acp++;
}
fs_copy_prop_dataflow::setup_initial_values()
{
/* Initialize the COPY and KILL sets. */
- for (int b = 0; b < cfg->num_blocks; b++) {
- bblock_t *block = cfg->blocks[b];
-
+ foreach_block (block, cfg) {
foreach_inst_in_block(fs_inst, inst, block) {
- if (inst->dst.file != GRF)
+ if (inst->dst.file != VGRF)
continue;
/* Mark ACP entries which are killed by this instruction. */
for (int i = 0; i < num_acp; i++) {
if (inst->overwrites_reg(acp[i]->dst) ||
inst->overwrites_reg(acp[i]->src)) {
- BITSET_SET(bd[b].kill, i);
+ BITSET_SET(bd[block->num].kill, i);
}
}
}
* For the others, set liveout to 0 (the empty set) and livein to ~0
* (the universal set).
*/
- for (int b = 0; b < cfg->num_blocks; b++) {
- bblock_t *block = cfg->blocks[b];
+ foreach_block (block, cfg) {
if (block->parents.is_empty()) {
for (int i = 0; i < bitset_words; i++) {
- bd[b].livein[i] = 0u;
- bd[b].liveout[i] = bd[b].copy[i];
+ bd[block->num].livein[i] = 0u;
+ bd[block->num].liveout[i] = bd[block->num].copy[i];
}
} else {
for (int i = 0; i < bitset_words; i++) {
- bd[b].liveout[i] = 0u;
- bd[b].livein[i] = ~0u;
+ bd[block->num].liveout[i] = 0u;
+ bd[block->num].livein[i] = ~0u;
}
}
}
progress = false;
/* Update liveout for all blocks. */
- for (int b = 0; b < cfg->num_blocks; b++) {
- if (cfg->blocks[b]->parents.is_empty())
+ foreach_block (block, cfg) {
+ if (block->parents.is_empty())
continue;
for (int i = 0; i < bitset_words; i++) {
- const BITSET_WORD old_liveout = bd[b].liveout[i];
+ const BITSET_WORD old_liveout = bd[block->num].liveout[i];
- bd[b].liveout[i] =
- bd[b].copy[i] | (bd[b].livein[i] & ~bd[b].kill[i]);
+ bd[block->num].liveout[i] =
+ bd[block->num].copy[i] | (bd[block->num].livein[i] &
+ ~bd[block->num].kill[i]);
- if (old_liveout != bd[b].liveout[i])
+ if (old_liveout != bd[block->num].liveout[i])
progress = true;
}
}
/* Update livein for all blocks. If a copy is live out of all parent
* blocks, it's live coming in to this block.
*/
- for (int b = 0; b < cfg->num_blocks; b++) {
- if (cfg->blocks[b]->parents.is_empty())
+ foreach_block (block, cfg) {
+ if (block->parents.is_empty())
continue;
for (int i = 0; i < bitset_words; i++) {
- const BITSET_WORD old_livein = bd[b].livein[i];
+ const BITSET_WORD old_livein = bd[block->num].livein[i];
- bd[b].livein[i] = ~0u;
- foreach_list_typed(bblock_link, link, link, &cfg->blocks[b]->parents) {
- bblock_t *block = link->block;
- bd[b].livein[i] &= bd[block->block_num].liveout[i];
+ bd[block->num].livein[i] = ~0u;
+ foreach_list_typed(bblock_link, parent_link, link, &block->parents) {
+ bblock_t *parent = parent_link->block;
+ bd[block->num].livein[i] &= bd[parent->num].liveout[i];
}
- if (old_livein != bd[b].livein[i])
+ if (old_livein != bd[block->num].livein[i])
progress = true;
}
}
void
fs_copy_prop_dataflow::dump_block_data() const
{
- for (int b = 0; b < cfg->num_blocks; b++) {
- bblock_t *block = cfg->blocks[b];
- fprintf(stderr, "Block %d [%d, %d] (parents ", block->block_num,
+ foreach_block (block, cfg) {
+ fprintf(stderr, "Block %d [%d, %d] (parents ", block->num,
block->start_ip, block->end_ip);
foreach_list_typed(bblock_link, link, link, &block->parents) {
bblock_t *parent = link->block;
- fprintf(stderr, "%d ", parent->block_num);
+ fprintf(stderr, "%d ", parent->num);
}
fprintf(stderr, "):\n");
fprintf(stderr, " livein = 0x");
for (int i = 0; i < bitset_words; i++)
- fprintf(stderr, "%08x", bd[b].livein[i]);
+ fprintf(stderr, "%08x", bd[block->num].livein[i]);
fprintf(stderr, ", liveout = 0x");
for (int i = 0; i < bitset_words; i++)
- fprintf(stderr, "%08x", bd[b].liveout[i]);
+ fprintf(stderr, "%08x", bd[block->num].liveout[i]);
fprintf(stderr, ",\n copy = 0x");
for (int i = 0; i < bitset_words; i++)
- fprintf(stderr, "%08x", bd[b].copy[i]);
+ fprintf(stderr, "%08x", bd[block->num].copy[i]);
fprintf(stderr, ", kill = 0x");
for (int i = 0; i < bitset_words; i++)
- fprintf(stderr, "%08x", bd[b].kill[i]);
+ fprintf(stderr, "%08x", bd[block->num].kill[i]);
fprintf(stderr, "\n");
}
}
opcode == BRW_OPCODE_NOT);
}
+static bool
+can_take_stride(fs_inst *inst, unsigned arg, unsigned stride,
+ const brw_device_info *devinfo)
+{
+ if (stride > 4)
+ return false;
+
+ /* 3-source instructions can only be Align16, which restricts what strides
+ * they can take. They can only take a stride of 1 (the usual case), or 0
+ * with a special "repctrl" bit. But the repctrl bit doesn't work for
+ * 64-bit datatypes, so if the source type is 64-bit then only a stride of
+ * 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page
+ * 944:
+ *
+ * This is applicable to 32b datatypes and 16b datatype. 64b datatypes
+ * cannot use the replicate control.
+ */
+ if (inst->is_3src(devinfo)) {
+ if (type_sz(inst->src[arg].type) > 4)
+ return stride == 1;
+ else
+ return stride == 1 || stride == 0;
+ }
+
+ /* From the Broadwell PRM, Volume 2a "Command Reference - Instructions",
+ * page 391 ("Extended Math Function"):
+ *
+ * The following restrictions apply for align1 mode: Scalar source is
+ * supported. Source and destination horizontal stride must be the
+ * same.
+ *
+ * From the Haswell PRM Volume 2b "Command Reference - Instructions", page
+ * 134 ("Extended Math Function"):
+ *
+ * Scalar source is supported. Source and destination horizontal stride
+ * must be 1.
+ *
+ * and similar language exists for IVB and SNB. Pre-SNB, math instructions
+ * are sends, so the sources are moved to MRF's and there are no
+ * restrictions.
+ */
+ if (inst->is_math()) {
+ if (devinfo->gen == 6 || devinfo->gen == 7) {
+ assert(inst->dst.stride == 1);
+ return stride == 1 || stride == 0;
+ } else if (devinfo->gen >= 8) {
+ return stride == inst->dst.stride || stride == 0;
+ }
+ }
+
+ return true;
+}
+
bool
fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
{
+ if (inst->src[arg].file != VGRF)
+ return false;
+
if (entry->src.file == IMM)
return false;
+ assert(entry->src.file == VGRF || entry->src.file == UNIFORM ||
+ entry->src.file == ATTR);
if (entry->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD)
return false;
- /* Bail if inst is reading more than entry is writing. */
- if ((inst->regs_read(this, arg) * inst->src[arg].stride *
- type_sz(inst->src[arg].type)) > type_sz(entry->dst.type))
+ assert(entry->dst.file == VGRF);
+ if (inst->src[arg].nr != entry->dst.nr)
return false;
- if (inst->src[arg].file != entry->dst.file ||
- inst->src[arg].reg != entry->dst.reg ||
- inst->src[arg].reg_offset != entry->dst.reg_offset ||
- inst->src[arg].subreg_offset != entry->dst.subreg_offset) {
+ /* Bail if inst is reading a range that isn't contained in the range
+ * that entry is writing.
+ */
+ if (inst->src[arg].reg_offset < entry->dst.reg_offset ||
+ (inst->src[arg].reg_offset * 32 + inst->src[arg].subreg_offset +
+ inst->regs_read(arg) * inst->src[arg].stride * 32) >
+ (entry->dst.reg_offset + entry->regs_written) * 32)
return false;
- }
- /* See resolve_ud_negate() and comment in brw_fs_emit.cpp. */
- if (inst->conditional_mod &&
- inst->src[arg].type == BRW_REGISTER_TYPE_UD &&
+ /* we can't generally copy-propagate UD negations because we
+ * can end up accessing the resulting values as signed integers
+ * instead. See also resolve_ud_negate() and comment in
+ * fs_generator::generate_code.
+ */
+ if (entry->src.type == BRW_REGISTER_TYPE_UD &&
entry->src.negate)
return false;
if ((has_source_modifiers || entry->src.file == UNIFORM ||
!entry->src.is_contiguous()) &&
- !inst->can_do_source_mods(brw))
+ !inst->can_do_source_mods(devinfo))
+ return false;
+
+ if (has_source_modifiers &&
+ inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_WRITE)
return false;
/* Bail if the result of composing both strides would exceed the
* hardware limit.
*/
- if (entry->src.stride * inst->src[arg].stride > 4)
+ if (!can_take_stride(inst, arg, entry->src.stride * inst->src[arg].stride,
+ devinfo))
+ return false;
+
+ /* Bail if the instruction type is larger than the execution type of the
+ * copy, what implies that each channel is reading multiple channels of the
+ * destination of the copy, and simply replacing the sources would give a
+ * program with different semantics.
+ */
+ if (type_sz(entry->dst.type) < type_sz(inst->src[arg].type))
return false;
/* Bail if the result of composing both strides cannot be expressed
type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0)
return false;
- if (has_source_modifiers && entry->dst.type != inst->src[arg].type)
+ if (has_source_modifiers &&
+ entry->dst.type != inst->src[arg].type &&
+ !inst->can_change_types())
return false;
- if (brw->gen >= 8 && (entry->src.negate || entry->src.abs) &&
+ if (devinfo->gen >= 8 && (entry->src.negate || entry->src.abs) &&
is_logic_op(inst->opcode)) {
return false;
}
+ if (entry->saturate) {
+ switch(inst->opcode) {
+ case BRW_OPCODE_SEL:
+ if (inst->src[1].file != IMM ||
+ inst->src[1].f < 0.0 ||
+ inst->src[1].f > 1.0) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+ }
+
inst->src[arg].file = entry->src.file;
- inst->src[arg].reg = entry->src.reg;
- inst->src[arg].reg_offset = entry->src.reg_offset;
- inst->src[arg].subreg_offset = entry->src.subreg_offset;
+ inst->src[arg].nr = entry->src.nr;
inst->src[arg].stride *= entry->src.stride;
+ inst->saturate = inst->saturate || entry->saturate;
+
+ switch (entry->src.file) {
+ case UNIFORM:
+ case BAD_FILE:
+ case ARF:
+ case FIXED_GRF:
+ inst->src[arg].reg_offset = entry->src.reg_offset;
+ inst->src[arg].subreg_offset = entry->src.subreg_offset;
+ break;
+ case ATTR:
+ case VGRF:
+ {
+ /* In this case, we'll just leave the width alone. The source
+ * register could have different widths depending on how it is
+ * being used. For instance, if only half of the register was
+ * used then we want to preserve that and continue to only use
+ * half.
+ *
+ * Also, we have to deal with mapping parts of vgrfs to other
+ * parts of vgrfs so we have to do some reg_offset magic.
+ */
- if (!inst->src[arg].abs) {
- inst->src[arg].abs = entry->src.abs;
- inst->src[arg].negate ^= entry->src.negate;
+ /* Compute the offset of inst->src[arg] relative to inst->dst */
+ assert(entry->dst.subreg_offset == 0);
+ int rel_offset = inst->src[arg].reg_offset - entry->dst.reg_offset;
+ int rel_suboffset = inst->src[arg].subreg_offset;
+
+ /* Compute the final register offset (in bytes) */
+ int offset = entry->src.reg_offset * 32 + entry->src.subreg_offset;
+ offset += rel_offset * 32 + rel_suboffset;
+ inst->src[arg].reg_offset = offset / 32;
+ inst->src[arg].subreg_offset = offset % 32;
+ }
+ break;
+
+ case MRF:
+ case IMM:
+ unreachable("not reached");
+ }
+
+ if (has_source_modifiers) {
+ if (entry->dst.type != inst->src[arg].type) {
+ /* We are propagating source modifiers from a MOV with a different
+ * type. If we got here, then we can just change the source and
+ * destination types of the instruction and keep going.
+ */
+ assert(inst->can_change_types());
+ for (int i = 0; i < inst->sources; i++) {
+ inst->src[i].type = entry->dst.type;
+ }
+ inst->dst.type = entry->dst.type;
+ }
+
+ if (!inst->src[arg].abs) {
+ inst->src[arg].abs = entry->src.abs;
+ inst->src[arg].negate ^= entry->src.negate;
+ }
}
return true;
}
-static bool
-try_constant_propagate(struct brw_context *brw, fs_inst *inst,
- acp_entry *entry)
+bool
+fs_visitor::try_constant_propagate(fs_inst *inst, acp_entry *entry)
{
bool progress = false;
if (entry->src.file != IMM)
return false;
+ if (type_sz(entry->src.type) > 4)
+ return false;
+ if (entry->saturate)
+ return false;
for (int i = inst->sources - 1; i >= 0; i--) {
- if (inst->src[i].file != entry->dst.file ||
- inst->src[i].reg != entry->dst.reg ||
- inst->src[i].reg_offset != entry->dst.reg_offset ||
- inst->src[i].subreg_offset != entry->dst.subreg_offset ||
- inst->src[i].type != entry->dst.type ||
- inst->src[i].stride > 1)
+ if (inst->src[i].file != VGRF)
continue;
- /* Don't bother with cases that should have been taken care of by the
- * GLSL compiler's constant folding pass.
+ assert(entry->dst.file == VGRF);
+ if (inst->src[i].nr != entry->dst.nr)
+ continue;
+
+ /* Bail if inst is reading a range that isn't contained in the range
+ * that entry is writing.
*/
- if (inst->src[i].negate || inst->src[i].abs)
+ if (inst->src[i].reg_offset < entry->dst.reg_offset ||
+ (inst->src[i].reg_offset * 32 + inst->src[i].subreg_offset +
+ inst->regs_read(i) * inst->src[i].stride * 32) >
+ (entry->dst.reg_offset + entry->regs_written) * 32)
continue;
+ fs_reg val = entry->src;
+ val.type = inst->src[i].type;
+
+ if (inst->src[i].abs) {
+ if ((devinfo->gen >= 8 && is_logic_op(inst->opcode)) ||
+ !brw_abs_immediate(val.type, &val.as_brw_reg())) {
+ continue;
+ }
+ }
+
+ if (inst->src[i].negate) {
+ if ((devinfo->gen >= 8 && is_logic_op(inst->opcode)) ||
+ !brw_negate_immediate(val.type, &val.as_brw_reg())) {
+ continue;
+ }
+ }
+
switch (inst->opcode) {
case BRW_OPCODE_MOV:
- inst->src[i] = entry->src;
+ case SHADER_OPCODE_LOAD_PAYLOAD:
+ case FS_OPCODE_PACK:
+ inst->src[i] = val;
progress = true;
break;
- case SHADER_OPCODE_POW:
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
- if (brw->gen < 8)
+ /* FINISHME: Promote non-float constants and remove this. */
+ if (devinfo->gen < 8)
+ break;
+ /* fallthrough */
+ case SHADER_OPCODE_POW:
+ /* Allow constant propagation into src1 (except on Gen 6), and let
+ * constant combining promote the constant on Gen < 8.
+ *
+ * While Gen 6 MATH can take a scalar source, its source and
+ * destination offsets must be equal and we cannot ensure that.
+ */
+ if (devinfo->gen == 6)
break;
/* fallthrough */
case BRW_OPCODE_BFI1:
case BRW_OPCODE_SHR:
case BRW_OPCODE_SUBB:
if (i == 1) {
- inst->src[i] = entry->src;
+ inst->src[i] = val;
progress = true;
}
break;
case BRW_OPCODE_MACH:
case BRW_OPCODE_MUL:
+ case SHADER_OPCODE_MULH:
case BRW_OPCODE_ADD:
case BRW_OPCODE_OR:
case BRW_OPCODE_AND:
case BRW_OPCODE_XOR:
case BRW_OPCODE_ADDC:
if (i == 1) {
- inst->src[i] = entry->src;
+ inst->src[i] = val;
progress = true;
} else if (i == 0 && inst->src[1].file != IMM) {
/* Fit this constant in by commuting the operands.
* Exception: we can't do this for 32-bit integer MUL/MACH
* because it's asymmetric.
+ *
+ * The BSpec says for Broadwell that
+ *
+ * "When multiplying DW x DW, the dst cannot be accumulator."
+ *
+ * Integer MUL with a non-accumulator destination will be lowered
+ * by lower_integer_multiplication(), so don't restrict it.
*/
- if ((inst->opcode == BRW_OPCODE_MUL ||
+ if (((inst->opcode == BRW_OPCODE_MUL &&
+ inst->dst.is_accumulator()) ||
inst->opcode == BRW_OPCODE_MACH) &&
(inst->src[1].type == BRW_REGISTER_TYPE_D ||
inst->src[1].type == BRW_REGISTER_TYPE_UD))
break;
inst->src[0] = inst->src[1];
- inst->src[1] = entry->src;
+ inst->src[1] = val;
progress = true;
}
break;
case BRW_OPCODE_CMP:
case BRW_OPCODE_IF:
if (i == 1) {
- inst->src[i] = entry->src;
+ inst->src[i] = val;
progress = true;
} else if (i == 0 && inst->src[1].file != IMM) {
enum brw_conditional_mod new_cmod;
* flipping the test
*/
inst->src[0] = inst->src[1];
- inst->src[1] = entry->src;
+ inst->src[1] = val;
inst->conditional_mod = new_cmod;
progress = true;
}
case BRW_OPCODE_SEL:
if (i == 1) {
- inst->src[i] = entry->src;
+ inst->src[i] = val;
progress = true;
} else if (i == 0 && inst->src[1].file != IMM) {
inst->src[0] = inst->src[1];
- inst->src[1] = entry->src;
+ inst->src[1] = val;
/* If this was predicated, flipping operands means
* we also need to flip the predicate.
}
break;
- case SHADER_OPCODE_RCP:
- /* The hardware doesn't do math on immediate values
- * (because why are you doing that, seriously?), but
- * the correct answer is to just constant fold it
- * anyway.
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
+ case SHADER_OPCODE_TYPED_ATOMIC:
+ case SHADER_OPCODE_TYPED_SURFACE_READ:
+ case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+ /* We only propagate into the surface argument of the
+ * instruction. Everything else goes through LOAD_PAYLOAD.
*/
- assert(i == 0);
- if (inst->src[0].fixed_hw_reg.dw1.f != 0.0f) {
- inst->opcode = BRW_OPCODE_MOV;
- inst->src[0] = entry->src;
- inst->src[0].fixed_hw_reg.dw1.f = 1.0f / inst->src[0].fixed_hw_reg.dw1.f;
+ if (i == 1) {
+ inst->src[i] = val;
progress = true;
}
break;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
- inst->src[i] = entry->src;
+ case SHADER_OPCODE_BROADCAST:
+ inst->src[i] = val;
+ progress = true;
+ break;
+
+ case BRW_OPCODE_MAD:
+ case BRW_OPCODE_LRP:
+ inst->src[i] = val;
progress = true;
break;
can_propagate_from(fs_inst *inst)
{
return (inst->opcode == BRW_OPCODE_MOV &&
- inst->dst.file == GRF &&
- ((inst->src[0].file == GRF &&
- (inst->src[0].reg != inst->dst.reg ||
+ inst->dst.file == VGRF &&
+ ((inst->src[0].file == VGRF &&
+ (inst->src[0].nr != inst->dst.nr ||
inst->src[0].reg_offset != inst->dst.reg_offset)) ||
+ inst->src[0].file == ATTR ||
inst->src[0].file == UNIFORM ||
inst->src[0].file == IMM) &&
inst->src[0].type == inst->dst.type &&
- !inst->saturate &&
!inst->is_partial_write());
}
foreach_inst_in_block(fs_inst, inst, block) {
/* Try propagating into this instruction. */
for (int i = 0; i < inst->sources; i++) {
- if (inst->src[i].file != GRF)
+ if (inst->src[i].file != VGRF)
continue;
- foreach_in_list(acp_entry, entry, &acp[inst->src[i].reg % ACP_HASH_SIZE]) {
- if (try_constant_propagate(brw, inst, entry))
+ foreach_in_list(acp_entry, entry, &acp[inst->src[i].nr % ACP_HASH_SIZE]) {
+ if (try_constant_propagate(inst, entry))
progress = true;
-
- if (try_copy_propagate(inst, i, entry))
+ else if (try_copy_propagate(inst, i, entry))
progress = true;
}
}
/* kill the destination from the ACP */
- if (inst->dst.file == GRF) {
- foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.reg % ACP_HASH_SIZE]) {
+ if (inst->dst.file == VGRF) {
+ foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.nr % ACP_HASH_SIZE]) {
if (inst->overwrites_reg(entry->dst)) {
entry->remove();
}
acp_entry *entry = ralloc(copy_prop_ctx, acp_entry);
entry->dst = inst->dst;
entry->src = inst->src[0];
+ entry->regs_written = inst->regs_written;
entry->opcode = inst->opcode;
- acp[entry->dst.reg % ACP_HASH_SIZE].push_tail(entry);
+ entry->saturate = inst->saturate;
+ acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
} else if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
- inst->dst.file == GRF) {
+ inst->dst.file == VGRF) {
+ int offset = 0;
for (int i = 0; i < inst->sources; i++) {
- if (inst->src[i].file == GRF) {
+ int effective_width = i < inst->header_size ? 8 : inst->exec_size;
+ int regs_written = effective_width / 8;
+ if (inst->src[i].file == VGRF) {
acp_entry *entry = ralloc(copy_prop_ctx, acp_entry);
entry->dst = inst->dst;
- entry->dst.reg_offset = i;
+ entry->dst.reg_offset = offset;
entry->src = inst->src[i];
+ entry->regs_written = regs_written;
entry->opcode = inst->opcode;
if (!entry->dst.equals(inst->src[i])) {
- acp[entry->dst.reg % ACP_HASH_SIZE].push_tail(entry);
+ acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
} else {
ralloc_free(entry);
}
}
+ offset += regs_written;
}
}
}
bool
fs_visitor::opt_copy_propagate()
{
- calculate_cfg();
-
bool progress = false;
void *copy_prop_ctx = ralloc_context(NULL);
exec_list *out_acp[cfg->num_blocks];
/* First, walk through each block doing local copy propagation and getting
* the set of copies available at the end of the block.
*/
- for (int b = 0; b < cfg->num_blocks; b++) {
- bblock_t *block = cfg->blocks[b];
-
+ foreach_block (block, cfg) {
progress = opt_copy_propagate_local(copy_prop_ctx, block,
- out_acp[b]) || progress;
+ out_acp[block->num]) || progress;
}
/* Do dataflow analysis for those available copies. */
/* Next, re-run local copy propagation, this time with the set of copies
* provided by the dataflow analysis available at the start of a block.
*/
- for (int b = 0; b < cfg->num_blocks; b++) {
- bblock_t *block = cfg->blocks[b];
+ foreach_block (block, cfg) {
exec_list in_acp[ACP_HASH_SIZE];
for (int i = 0; i < dataflow.num_acp; i++) {
- if (BITSET_TEST(dataflow.bd[b].livein, i)) {
+ if (BITSET_TEST(dataflow.bd[block->num].livein, i)) {
struct acp_entry *entry = dataflow.acp[i];
- in_acp[entry->dst.reg % ACP_HASH_SIZE].push_tail(entry);
+ in_acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
}
}