#include "brw_fs.h"
#include "brw_vec4.h"
+#include "brw_cfg.h"
+#include "brw_shader.h"
#include "glsl/glsl_types.h"
#include "glsl/ir_optimization.h"
static bool debug = false;
+class instruction_scheduler;
+
class schedule_node : public exec_node
{
public:
- schedule_node(backend_instruction *inst, const struct brw_context *brw)
- {
- this->inst = inst;
- this->child_array_size = 0;
- this->children = NULL;
- this->child_latency = NULL;
- this->child_count = 0;
- this->parent_count = 0;
- this->unblocked_time = 0;
-
- /* We can't measure Gen6 timings directly but expect them to be much
- * closer to Gen7 than Gen4.
- */
- if (brw->gen >= 6)
- set_latency_gen7(brw->is_haswell);
- else
- set_latency_gen4();
- }
-
+ schedule_node(backend_instruction *inst, instruction_scheduler *sched);
void set_latency_gen4();
void set_latency_gen7(bool is_haswell);
int child_array_size;
int unblocked_time;
int latency;
+
+ /**
+ * Which iteration of pushing groups of children onto the candidates list
+ * this node was a part of.
+ */
+ unsigned cand_generation;
+
+ /**
+ * This is the sum of the instruction's latency plus the maximum delay of
+ * its children, or just the issue_time if it's a leaf node.
+ */
+ int delay;
};
void
case BRW_OPCODE_MAD:
/* 2 cycles
* (since the last two src operands are in different register banks):
- * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
*
* 3 cycles on IVB, 4 on HSW
* (since the last two src operands are in the same register bank):
- * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
*
* 18 cycles on IVB, 16 on HSW
* (since the last two src operands are in different register banks):
- * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
* mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
*
* 20 cycles on IVB, 18 on HSW
* (since the last two src operands are in the same register bank):
- * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
*/
case BRW_OPCODE_LRP:
/* 2 cycles
* (since the last two src operands are in different register banks):
- * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
*
* 3 cycles on IVB, 4 on HSW
* (since the last two src operands are in the same register bank):
- * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
*
* 16 cycles on IVB, 14 on HSW
* (since the last two src operands are in different register banks):
- * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
*
* 16 cycles
* (since the last two src operands are in the same register bank):
- * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q };
+ * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
*/
latency = 200;
break;
+ case SHADER_OPCODE_GEN7_SCRATCH_READ:
+ /* Testing a load from offset 0, that had been previously written:
+ *
+ * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
+ * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
+ *
+ * The cycles spent seemed to be grouped around 40-50 (as low as 38),
+ * then around 140. Presumably this is cache hit vs miss.
+ */
+ latency = 50;
+ break;
+
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ case SHADER_OPCODE_TYPED_ATOMIC:
+ /* Test code:
+ * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
+ * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
+ * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
+ * send(8) g4<1>ud g112<8,8,1>ud
+ * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
+ *
+ * Running it 100 times as fragment shader on a 128x128 quad
+ * gives an average latency of 13867 cycles per atomic op,
+ * standard deviation 3%. Note that this is a rather
+ * pessimistic estimate, the actual latency in cases with few
+ * collisions between threads and favorable pipelining has been
+ * seen to be reduced by a factor of 100.
+ */
+ latency = 14000;
+ break;
+
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
+ case SHADER_OPCODE_TYPED_SURFACE_READ:
+ case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+ /* Test code:
+ * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
+ * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
+ * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
+ * send(8) g4<1>UD g112<8,8,1>UD
+ * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
+ * .
+ * . [repeats 8 times]
+ * .
+ * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
+ * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
+ * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
+ * send(8) g4<1>UD g112<8,8,1>UD
+ * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
+ *
+ * Running it 100 times as fragment shader on a 128x128 quad
+ * gives an average latency of 583 cycles per surface read,
+ * standard deviation 0.9%.
+ */
+ latency = is_haswell ? 300 : 600;
+ break;
+
default:
/* 2 cycles:
* mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
class instruction_scheduler {
public:
- instruction_scheduler(backend_visitor *v, int grf_count, bool post_reg_alloc)
+ instruction_scheduler(backend_shader *s, int grf_count,
+ instruction_scheduler_mode mode)
{
- this->bv = v;
- this->mem_ctx = ralloc_context(v->mem_ctx);
+ this->bs = s;
+ this->mem_ctx = ralloc_context(NULL);
this->grf_count = grf_count;
this->instructions.make_empty();
this->instructions_to_schedule = 0;
- this->post_reg_alloc = post_reg_alloc;
+ this->post_reg_alloc = (mode == SCHEDULE_POST);
+ this->mode = mode;
this->time = 0;
+ if (!post_reg_alloc) {
+ this->remaining_grf_uses = rzalloc_array(mem_ctx, int, grf_count);
+ this->grf_active = rzalloc_array(mem_ctx, bool, grf_count);
+ } else {
+ this->remaining_grf_uses = NULL;
+ this->grf_active = NULL;
+ }
}
~instruction_scheduler()
void add_dep(schedule_node *before, schedule_node *after, int latency);
void add_dep(schedule_node *before, schedule_node *after);
- void run(exec_list *instructions);
- void add_inst(backend_instruction *inst);
+ void run(cfg_t *cfg);
+ void add_insts_from_block(bblock_t *block);
+ void compute_delay(schedule_node *node);
virtual void calculate_deps() = 0;
virtual schedule_node *choose_instruction_to_schedule() = 0;
* Returns how many cycles it takes the instruction to issue.
*
* Instructions in gen hardware are handled one simd4 vector at a time,
- * with 1 cycle per vector dispatched. Thus 8-wide pixel shaders take 2
- * cycles to dispatch and 16-wide (compressed) instructions take 4.
+ * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
+ * cycles to dispatch and SIMD16 (compressed) instructions take 4.
*/
virtual int issue_time(backend_instruction *inst) = 0;
- void schedule_instructions(backend_instruction *next_block_header);
+ virtual void count_remaining_grf_uses(backend_instruction *inst) = 0;
+ virtual void update_register_pressure(backend_instruction *inst) = 0;
+ virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
+
+ void schedule_instructions(bblock_t *block);
void *mem_ctx;
int grf_count;
int time;
exec_list instructions;
- backend_visitor *bv;
+ backend_shader *bs;
+
+ instruction_scheduler_mode mode;
+
+ /**
+ * Number of instructions left to schedule that reference each vgrf.
+ *
+ * Used so that we can prefer scheduling instructions that will end the
+ * live intervals of multiple variables, to reduce register pressure.
+ */
+ int *remaining_grf_uses;
+
+ /**
+ * Tracks whether each VGRF has had an instruction scheduled that uses it.
+ *
+ * This is used to estimate whether scheduling a new instruction will
+ * increase register pressure.
+ */
+ bool *grf_active;
};
class fs_instruction_scheduler : public instruction_scheduler
{
public:
- fs_instruction_scheduler(fs_visitor *v, int grf_count, bool post_reg_alloc);
+ fs_instruction_scheduler(fs_visitor *v, int grf_count,
+ instruction_scheduler_mode mode);
void calculate_deps();
bool is_compressed(fs_inst *inst);
schedule_node *choose_instruction_to_schedule();
int issue_time(backend_instruction *inst);
fs_visitor *v;
+
+ void count_remaining_grf_uses(backend_instruction *inst);
+ void update_register_pressure(backend_instruction *inst);
+ int get_register_pressure_benefit(backend_instruction *inst);
};
fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v,
int grf_count,
- bool post_reg_alloc)
- : instruction_scheduler(v, grf_count, post_reg_alloc),
+ instruction_scheduler_mode mode)
+ : instruction_scheduler(v, grf_count, mode),
v(v)
{
}
+void
+fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
+{
+ fs_inst *inst = (fs_inst *)be;
+
+ if (!remaining_grf_uses)
+ return;
+
+ if (inst->dst.file == GRF)
+ remaining_grf_uses[inst->dst.reg]++;
+
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file != GRF)
+ continue;
+
+ remaining_grf_uses[inst->src[i].reg]++;
+ }
+}
+
+void
+fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
+{
+ fs_inst *inst = (fs_inst *)be;
+
+ if (!remaining_grf_uses)
+ return;
+
+ if (inst->dst.file == GRF) {
+ remaining_grf_uses[inst->dst.reg]--;
+ grf_active[inst->dst.reg] = true;
+ }
+
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file == GRF) {
+ remaining_grf_uses[inst->src[i].reg]--;
+ grf_active[inst->src[i].reg] = true;
+ }
+ }
+}
+
+int
+fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
+{
+ fs_inst *inst = (fs_inst *)be;
+ int benefit = 0;
+
+ if (inst->dst.file == GRF) {
+ if (remaining_grf_uses[inst->dst.reg] == 1)
+ benefit += v->alloc.sizes[inst->dst.reg];
+ if (!grf_active[inst->dst.reg])
+ benefit -= v->alloc.sizes[inst->dst.reg];
+ }
+
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file != GRF)
+ continue;
+
+ if (remaining_grf_uses[inst->src[i].reg] == 1)
+ benefit += v->alloc.sizes[inst->src[i].reg];
+ if (!grf_active[inst->src[i].reg])
+ benefit -= v->alloc.sizes[inst->src[i].reg];
+ }
+
+ return benefit;
+}
+
class vec4_instruction_scheduler : public instruction_scheduler
{
public:
schedule_node *choose_instruction_to_schedule();
int issue_time(backend_instruction *inst);
vec4_visitor *v;
+
+ void count_remaining_grf_uses(backend_instruction *inst);
+ void update_register_pressure(backend_instruction *inst);
+ int get_register_pressure_benefit(backend_instruction *inst);
};
vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
int grf_count)
- : instruction_scheduler(v, grf_count, true),
+ : instruction_scheduler(v, grf_count, SCHEDULE_POST),
v(v)
{
}
void
-instruction_scheduler::add_inst(backend_instruction *inst)
+vec4_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be)
{
- schedule_node *n = new(mem_ctx) schedule_node(inst, bv->brw);
+}
- assert(!inst->is_head_sentinel());
- assert(!inst->is_tail_sentinel());
+void
+vec4_instruction_scheduler::update_register_pressure(backend_instruction *be)
+{
+}
- this->instructions_to_schedule++;
+int
+vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
+{
+ return 0;
+}
- inst->remove();
- instructions.push_tail(n);
+schedule_node::schedule_node(backend_instruction *inst,
+ instruction_scheduler *sched)
+{
+ const struct brw_device_info *devinfo = sched->bs->devinfo;
+
+ this->inst = inst;
+ this->child_array_size = 0;
+ this->children = NULL;
+ this->child_latency = NULL;
+ this->child_count = 0;
+ this->parent_count = 0;
+ this->unblocked_time = 0;
+ this->cand_generation = 0;
+ this->delay = 0;
+
+ /* We can't measure Gen6 timings directly but expect them to be much
+ * closer to Gen7 than Gen4.
+ */
+ if (!sched->post_reg_alloc)
+ this->latency = 1;
+ else if (devinfo->gen >= 6)
+ set_latency_gen7(devinfo->is_haswell);
+ else
+ set_latency_gen4();
+}
+
+void
+instruction_scheduler::add_insts_from_block(bblock_t *block)
+{
+ /* Removing the last instruction from a basic block removes the block as
+ * well, so put a NOP at the end to keep it alive.
+ */
+ if (!block->end()->is_control_flow()) {
+ backend_instruction *nop = new(mem_ctx) backend_instruction();
+ nop->opcode = BRW_OPCODE_NOP;
+ block->end()->insert_after(block, nop);
+ }
+
+ foreach_inst_in_block_safe(backend_instruction, inst, block) {
+ if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
+ continue;
+
+ schedule_node *n = new(mem_ctx) schedule_node(inst, this);
+
+ this->instructions_to_schedule++;
+
+ inst->remove(block);
+ instructions.push_tail(n);
+ }
+}
+
+/** Recursive computation of the delay member of a node. */
+void
+instruction_scheduler::compute_delay(schedule_node *n)
+{
+ if (!n->child_count) {
+ n->delay = issue_time(n->inst);
+ } else {
+ for (int i = 0; i < n->child_count; i++) {
+ if (!n->children[i]->delay)
+ compute_delay(n->children[i]);
+ n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
+ }
+ }
}
/**
*/
void
instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
- int latency)
+ int latency)
{
if (!before || !after)
return;
for (int i = 0; i < before->child_count; i++) {
if (before->children[i] == after) {
- before->child_latency[i] = MAX2(before->child_latency[i], latency);
- return;
+ before->child_latency[i] = MAX2(before->child_latency[i], latency);
+ return;
}
}
if (before->child_array_size <= before->child_count) {
if (before->child_array_size < 16)
- before->child_array_size = 16;
+ before->child_array_size = 16;
else
- before->child_array_size *= 2;
+ before->child_array_size *= 2;
before->children = reralloc(mem_ctx, before->children,
- schedule_node *,
- before->child_array_size);
+ schedule_node *,
+ before->child_array_size);
before->child_latency = reralloc(mem_ctx, before->child_latency,
- int, before->child_array_size);
+ int, before->child_array_size);
}
before->children[before->child_count] = after;
if (prev) {
while (!prev->is_head_sentinel()) {
- add_dep(prev, n, 0);
- prev = (schedule_node *)prev->prev;
+ add_dep(prev, n, 0);
+ prev = (schedule_node *)prev->prev;
}
}
if (next) {
while (!next->is_tail_sentinel()) {
- add_dep(n, next, 0);
- next = (schedule_node *)next->next;
+ add_dep(n, next, 0);
+ next = (schedule_node *)next->next;
}
}
}
bool
fs_instruction_scheduler::is_compressed(fs_inst *inst)
{
- return (v->dispatch_width == 16 &&
- !inst->force_uncompressed &&
- !inst->force_sechalf);
+ return inst->exec_size == 16;
}
void
fs_instruction_scheduler::calculate_deps()
{
- /* Pre-register-allocation, this tracks the last write per VGRF (so
- * different reg_offsets within it can interfere when they shouldn't).
+ /* Pre-register-allocation, this tracks the last write per VGRF offset.
* After register allocation, reg_offsets are gone and we track individual
* GRF registers.
*/
- schedule_node *last_grf_write[grf_count];
+ schedule_node *last_grf_write[grf_count * 16];
schedule_node *last_mrf_write[BRW_MAX_MRF];
schedule_node *last_conditional_mod[2] = { NULL, NULL };
+ schedule_node *last_accumulator_write = NULL;
/* Fixed HW registers are assumed to be separate from the virtual
* GRFs, so they can be tracked separately. We don't really write
* to fixed GRFs much, so don't bother tracking them on a more
memset(last_mrf_write, 0, sizeof(last_mrf_write));
/* top-to-bottom dependencies: RAW and WAW. */
- foreach_list(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
+ foreach_in_list(schedule_node, n, &instructions) {
fs_inst *inst = (fs_inst *)n->inst;
- if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT)
+ if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
+ inst->has_side_effects())
add_barrier_deps(n);
/* read-after-write deps. */
- for (int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF) {
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file == GRF) {
if (post_reg_alloc) {
- for (int r = 0; r < reg_width; r++)
+ for (int r = 0; r < inst->regs_read(i); r++)
add_dep(last_grf_write[inst->src[i].reg + r], n);
} else {
- add_dep(last_grf_write[inst->src[i].reg], n);
+ for (int r = 0; r < inst->regs_read(i); r++) {
+ add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n);
+ }
}
- } else if (inst->src[i].file == HW_REG &&
- (inst->src[i].fixed_hw_reg.file ==
- BRW_GENERAL_REGISTER_FILE)) {
- if (post_reg_alloc) {
+ } else if (inst->src[i].file == HW_REG &&
+ (inst->src[i].fixed_hw_reg.file ==
+ BRW_GENERAL_REGISTER_FILE)) {
+ if (post_reg_alloc) {
int size = reg_width;
if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
size = 1;
} else {
add_dep(last_fixed_grf_write, n);
}
- } else if (inst->src[i].file != BAD_FILE &&
- inst->src[i].file != IMM &&
- inst->src[i].file != UNIFORM) {
- assert(inst->src[i].file != MRF);
- add_barrier_deps(n);
- }
+ } else if (inst->src[i].is_accumulator()) {
+ add_dep(last_accumulator_write, n);
+ } else if (inst->src[i].file != BAD_FILE &&
+ inst->src[i].file != IMM &&
+ inst->src[i].file != UNIFORM &&
+ (inst->src[i].file != HW_REG ||
+ inst->src[i].fixed_hw_reg.file != IMM)) {
+ assert(inst->src[i].file != MRF);
+ add_barrier_deps(n);
+ }
}
- for (int i = 0; i < inst->mlen; i++) {
- /* It looks like the MRF regs are released in the send
- * instruction once it's sent, not when the result comes
- * back.
- */
- add_dep(last_mrf_write[inst->base_mrf + i], n);
+ if (inst->base_mrf != -1) {
+ for (int i = 0; i < inst->mlen; i++) {
+ /* It looks like the MRF regs are released in the send
+ * instruction once it's sent, not when the result comes
+ * back.
+ */
+ add_dep(last_mrf_write[inst->base_mrf + i], n);
+ }
+ }
+
+ if (inst->reads_flag()) {
+ add_dep(last_conditional_mod[inst->flag_subreg], n);
}
- if (inst->predicate) {
- add_dep(last_conditional_mod[inst->flag_subreg], n);
+ if (inst->reads_accumulator_implicitly()) {
+ add_dep(last_accumulator_write, n);
}
/* write-after-write deps. */
if (inst->dst.file == GRF) {
if (post_reg_alloc) {
- for (int r = 0; r < inst->regs_written * reg_width; r++) {
+ for (int r = 0; r < inst->regs_written; r++) {
add_dep(last_grf_write[inst->dst.reg + r], n);
last_grf_write[inst->dst.reg + r] = n;
}
} else {
- add_dep(last_grf_write[inst->dst.reg], n);
- last_grf_write[inst->dst.reg] = n;
+ for (int r = 0; r < inst->regs_written; r++) {
+ add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n);
+ last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
+ }
}
} else if (inst->dst.file == MRF) {
- int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
-
- add_dep(last_mrf_write[reg], n);
- last_mrf_write[reg] = n;
- if (is_compressed(inst)) {
- if (inst->dst.reg & BRW_MRF_COMPR4)
- reg += 4;
- else
- reg++;
- add_dep(last_mrf_write[reg], n);
- last_mrf_write[reg] = n;
- }
+ int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
+
+ add_dep(last_mrf_write[reg], n);
+ last_mrf_write[reg] = n;
+ if (is_compressed(inst)) {
+ if (inst->dst.reg & BRW_MRF_COMPR4)
+ reg += 4;
+ else
+ reg++;
+ add_dep(last_mrf_write[reg], n);
+ last_mrf_write[reg] = n;
+ }
} else if (inst->dst.file == HW_REG &&
- inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
+ inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
if (post_reg_alloc) {
for (int r = 0; r < reg_width; r++)
last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
} else {
last_fixed_grf_write = n;
}
- } else if (inst->dst.file != BAD_FILE) {
- add_barrier_deps(n);
+ } else if (inst->dst.is_accumulator()) {
+ add_dep(last_accumulator_write, n);
+ last_accumulator_write = n;
+ } else if (inst->dst.file != BAD_FILE &&
+ !inst->dst.is_null()) {
+ add_barrier_deps(n);
}
- if (inst->mlen > 0) {
- for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
- add_dep(last_mrf_write[inst->base_mrf + i], n);
- last_mrf_write[inst->base_mrf + i] = n;
- }
+ if (inst->mlen > 0 && inst->base_mrf != -1) {
+ for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
+ add_dep(last_mrf_write[inst->base_mrf + i], n);
+ last_mrf_write[inst->base_mrf + i] = n;
+ }
}
- /* Treat FS_OPCODE_MOV_DISPATCH_TO_FLAGS as though it had a
- * conditional_mod, because it sets the flag register.
- */
- if (inst->conditional_mod ||
- inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
- add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
- last_conditional_mod[inst->flag_subreg] = n;
+ if (inst->writes_flag()) {
+ add_dep(last_conditional_mod[inst->flag_subreg], n, 0);
+ last_conditional_mod[inst->flag_subreg] = n;
+ }
+
+ if (inst->writes_accumulator_implicitly(v->devinfo) &&
+ !inst->dst.is_accumulator()) {
+ add_dep(last_accumulator_write, n);
+ last_accumulator_write = n;
}
}
memset(last_grf_write, 0, sizeof(last_grf_write));
memset(last_mrf_write, 0, sizeof(last_mrf_write));
memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
+ last_accumulator_write = NULL;
last_fixed_grf_write = NULL;
exec_node *node;
exec_node *prev;
for (node = instructions.get_tail(), prev = node->prev;
- !node->is_head_sentinel();
- node = prev, prev = node->prev) {
+ !node->is_head_sentinel();
+ node = prev, prev = node->prev) {
schedule_node *n = (schedule_node *)node;
fs_inst *inst = (fs_inst *)n->inst;
/* write-after-read deps. */
- for (int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF) {
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file == GRF) {
if (post_reg_alloc) {
- for (int r = 0; r < reg_width; r++)
+ for (int r = 0; r < inst->regs_read(i); r++)
add_dep(n, last_grf_write[inst->src[i].reg + r]);
} else {
- add_dep(n, last_grf_write[inst->src[i].reg]);
+ for (int r = 0; r < inst->regs_read(i); r++) {
+ add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]);
+ }
}
- } else if (inst->src[i].file == HW_REG &&
- (inst->src[i].fixed_hw_reg.file ==
- BRW_GENERAL_REGISTER_FILE)) {
- if (post_reg_alloc) {
+ } else if (inst->src[i].file == HW_REG &&
+ (inst->src[i].fixed_hw_reg.file ==
+ BRW_GENERAL_REGISTER_FILE)) {
+ if (post_reg_alloc) {
int size = reg_width;
if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0)
size = 1;
} else {
add_dep(n, last_fixed_grf_write);
}
+ } else if (inst->src[i].is_accumulator()) {
+ add_dep(n, last_accumulator_write);
} else if (inst->src[i].file != BAD_FILE &&
- inst->src[i].file != IMM &&
- inst->src[i].file != UNIFORM) {
- assert(inst->src[i].file != MRF);
- add_barrier_deps(n);
- }
+ inst->src[i].file != IMM &&
+ inst->src[i].file != UNIFORM &&
+ (inst->src[i].file != HW_REG ||
+ inst->src[i].fixed_hw_reg.file != IMM)) {
+ assert(inst->src[i].file != MRF);
+ add_barrier_deps(n);
+ }
}
- for (int i = 0; i < inst->mlen; i++) {
- /* It looks like the MRF regs are released in the send
- * instruction once it's sent, not when the result comes
- * back.
- */
- add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
+ if (inst->base_mrf != -1) {
+ for (int i = 0; i < inst->mlen; i++) {
+ /* It looks like the MRF regs are released in the send
+ * instruction once it's sent, not when the result comes
+ * back.
+ */
+ add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
+ }
}
- if (inst->predicate) {
- add_dep(n, last_conditional_mod[inst->flag_subreg]);
+ if (inst->reads_flag()) {
+ add_dep(n, last_conditional_mod[inst->flag_subreg]);
+ }
+
+ if (inst->reads_accumulator_implicitly()) {
+ add_dep(n, last_accumulator_write);
}
/* Update the things this instruction wrote, so earlier reads
*/
if (inst->dst.file == GRF) {
if (post_reg_alloc) {
- for (int r = 0; r < inst->regs_written * reg_width; r++)
+ for (int r = 0; r < inst->regs_written; r++)
last_grf_write[inst->dst.reg + r] = n;
} else {
- last_grf_write[inst->dst.reg] = n;
+ for (int r = 0; r < inst->regs_written; r++) {
+ last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n;
+ }
}
} else if (inst->dst.file == MRF) {
- int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
+ int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
- last_mrf_write[reg] = n;
+ last_mrf_write[reg] = n;
- if (is_compressed(inst)) {
- if (inst->dst.reg & BRW_MRF_COMPR4)
- reg += 4;
- else
- reg++;
+ if (is_compressed(inst)) {
+ if (inst->dst.reg & BRW_MRF_COMPR4)
+ reg += 4;
+ else
+ reg++;
- last_mrf_write[reg] = n;
- }
+ last_mrf_write[reg] = n;
+ }
} else if (inst->dst.file == HW_REG &&
- inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
+ inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
if (post_reg_alloc) {
for (int r = 0; r < reg_width; r++)
last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n;
} else {
last_fixed_grf_write = n;
}
- } else if (inst->dst.file != BAD_FILE) {
- add_barrier_deps(n);
+ } else if (inst->dst.is_accumulator()) {
+ last_accumulator_write = n;
+ } else if (inst->dst.file != BAD_FILE &&
+ !inst->dst.is_null()) {
+ add_barrier_deps(n);
}
- if (inst->mlen > 0) {
- for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
- last_mrf_write[inst->base_mrf + i] = n;
- }
+ if (inst->mlen > 0 && inst->base_mrf != -1) {
+ for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
+ last_mrf_write[inst->base_mrf + i] = n;
+ }
}
- /* Treat FS_OPCODE_MOV_DISPATCH_TO_FLAGS as though it had a
- * conditional_mod, because it sets the flag register.
- */
- if (inst->conditional_mod ||
- inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
- last_conditional_mod[inst->flag_subreg] = n;
+ if (inst->writes_flag()) {
+ last_conditional_mod[inst->flag_subreg] = n;
+ }
+
+ if (inst->writes_accumulator_implicitly(v->devinfo)) {
+ last_accumulator_write = n;
}
}
}
schedule_node *last_grf_write[grf_count];
schedule_node *last_mrf_write[BRW_MAX_MRF];
schedule_node *last_conditional_mod = NULL;
+ schedule_node *last_accumulator_write = NULL;
/* Fixed HW registers are assumed to be separate from the virtual
* GRFs, so they can be tracked separately. We don't really write
* to fixed GRFs much, so don't bother tracking them on a more
memset(last_mrf_write, 0, sizeof(last_mrf_write));
/* top-to-bottom dependencies: RAW and WAW. */
- foreach_list(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
+ foreach_in_list(schedule_node, n, &instructions) {
vec4_instruction *inst = (vec4_instruction *)n->inst;
+ if (inst->has_side_effects())
+ add_barrier_deps(n);
+
/* read-after-write deps. */
for (int i = 0; i < 3; i++) {
if (inst->src[i].file == GRF) {
- add_dep(last_grf_write[inst->src[i].reg], n);
+ for (unsigned j = 0; j < inst->regs_read(i); ++j)
+ add_dep(last_grf_write[inst->src[i].reg + j], n);
} else if (inst->src[i].file == HW_REG &&
(inst->src[i].fixed_hw_reg.file ==
BRW_GENERAL_REGISTER_FILE)) {
add_dep(last_fixed_grf_write, n);
+ } else if (inst->src[i].is_accumulator()) {
+ assert(last_accumulator_write);
+ add_dep(last_accumulator_write, n);
} else if (inst->src[i].file != BAD_FILE &&
inst->src[i].file != IMM &&
- inst->src[i].file != UNIFORM) {
+ inst->src[i].file != UNIFORM &&
+ (inst->src[i].file != HW_REG ||
+ inst->src[i].fixed_hw_reg.file != IMM)) {
/* No reads from MRF, and ATTR is already translated away */
assert(inst->src[i].file != MRF &&
inst->src[i].file != ATTR);
}
}
- for (int i = 0; i < inst->mlen; i++) {
- /* It looks like the MRF regs are released in the send
- * instruction once it's sent, not when the result comes
- * back.
- */
- add_dep(last_mrf_write[inst->base_mrf + i], n);
+ if (!inst->is_send_from_grf()) {
+ for (int i = 0; i < inst->mlen; i++) {
+ /* It looks like the MRF regs are released in the send
+ * instruction once it's sent, not when the result comes
+ * back.
+ */
+ add_dep(last_mrf_write[inst->base_mrf + i], n);
+ }
}
- if (inst->predicate) {
+ if (inst->reads_flag()) {
assert(last_conditional_mod);
add_dep(last_conditional_mod, n);
}
+ if (inst->reads_accumulator_implicitly()) {
+ assert(last_accumulator_write);
+ add_dep(last_accumulator_write, n);
+ }
+
/* write-after-write deps. */
if (inst->dst.file == GRF) {
- add_dep(last_grf_write[inst->dst.reg], n);
- last_grf_write[inst->dst.reg] = n;
+ for (unsigned j = 0; j < inst->regs_written; ++j) {
+ add_dep(last_grf_write[inst->dst.reg + j], n);
+ last_grf_write[inst->dst.reg + j] = n;
+ }
} else if (inst->dst.file == MRF) {
add_dep(last_mrf_write[inst->dst.reg], n);
last_mrf_write[inst->dst.reg] = n;
} else if (inst->dst.file == HW_REG &&
inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
last_fixed_grf_write = n;
- } else if (inst->dst.file != BAD_FILE) {
+ } else if (inst->dst.is_accumulator()) {
+ add_dep(last_accumulator_write, n);
+ last_accumulator_write = n;
+ } else if (inst->dst.file != BAD_FILE &&
+ !inst->dst.is_null()) {
add_barrier_deps(n);
}
- if (inst->mlen > 0) {
+ if (inst->mlen > 0 && !inst->is_send_from_grf()) {
for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
add_dep(last_mrf_write[inst->base_mrf + i], n);
last_mrf_write[inst->base_mrf + i] = n;
}
}
- if (inst->conditional_mod) {
+ if (inst->writes_flag()) {
add_dep(last_conditional_mod, n, 0);
last_conditional_mod = n;
}
+
+ if (inst->writes_accumulator_implicitly(v->devinfo) &&
+ !inst->dst.is_accumulator()) {
+ add_dep(last_accumulator_write, n);
+ last_accumulator_write = n;
+ }
}
/* bottom-to-top dependencies: WAR */
memset(last_grf_write, 0, sizeof(last_grf_write));
memset(last_mrf_write, 0, sizeof(last_mrf_write));
last_conditional_mod = NULL;
+ last_accumulator_write = NULL;
last_fixed_grf_write = NULL;
exec_node *node;
/* write-after-read deps. */
for (int i = 0; i < 3; i++) {
if (inst->src[i].file == GRF) {
- add_dep(n, last_grf_write[inst->src[i].reg]);
+ for (unsigned j = 0; j < inst->regs_read(i); ++j)
+ add_dep(n, last_grf_write[inst->src[i].reg + j]);
} else if (inst->src[i].file == HW_REG &&
(inst->src[i].fixed_hw_reg.file ==
BRW_GENERAL_REGISTER_FILE)) {
add_dep(n, last_fixed_grf_write);
+ } else if (inst->src[i].is_accumulator()) {
+ add_dep(n, last_accumulator_write);
} else if (inst->src[i].file != BAD_FILE &&
inst->src[i].file != IMM &&
- inst->src[i].file != UNIFORM) {
+ inst->src[i].file != UNIFORM &&
+ (inst->src[i].file != HW_REG ||
+ inst->src[i].fixed_hw_reg.file != IMM)) {
assert(inst->src[i].file != MRF &&
inst->src[i].file != ATTR);
add_barrier_deps(n);
}
}
- for (int i = 0; i < inst->mlen; i++) {
- /* It looks like the MRF regs are released in the send
- * instruction once it's sent, not when the result comes
- * back.
- */
- add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
+ if (!inst->is_send_from_grf()) {
+ for (int i = 0; i < inst->mlen; i++) {
+ /* It looks like the MRF regs are released in the send
+ * instruction once it's sent, not when the result comes
+ * back.
+ */
+ add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
+ }
}
- if (inst->predicate) {
+ if (inst->reads_flag()) {
add_dep(n, last_conditional_mod);
}
+ if (inst->reads_accumulator_implicitly()) {
+ add_dep(n, last_accumulator_write);
+ }
+
/* Update the things this instruction wrote, so earlier reads
* can mark this as WAR dependency.
*/
if (inst->dst.file == GRF) {
- last_grf_write[inst->dst.reg] = n;
+ for (unsigned j = 0; j < inst->regs_written; ++j)
+ last_grf_write[inst->dst.reg + j] = n;
} else if (inst->dst.file == MRF) {
last_mrf_write[inst->dst.reg] = n;
} else if (inst->dst.file == HW_REG &&
inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
last_fixed_grf_write = n;
- } else if (inst->dst.file != BAD_FILE) {
+ } else if (inst->dst.is_accumulator()) {
+ last_accumulator_write = n;
+ } else if (inst->dst.file != BAD_FILE &&
+ !inst->dst.is_null()) {
add_barrier_deps(n);
}
- if (inst->mlen > 0) {
+ if (inst->mlen > 0 && !inst->is_send_from_grf()) {
for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
last_mrf_write[inst->base_mrf + i] = n;
}
}
- if (inst->conditional_mod) {
+ if (inst->writes_flag()) {
last_conditional_mod = n;
}
+
+ if (inst->writes_accumulator_implicitly(v->devinfo)) {
+ last_accumulator_write = n;
+ }
}
}
{
schedule_node *chosen = NULL;
- if (post_reg_alloc) {
+ if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
int chosen_time = 0;
- /* Of the instructions closest ready to execute or the closest to
+ /* Of the instructions ready to execute or the closest to
* being ready, choose the oldest one.
*/
- foreach_list(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
-
+ foreach_in_list(schedule_node, n, &instructions) {
if (!chosen || n->unblocked_time < chosen_time) {
chosen = n;
chosen_time = n->unblocked_time;
} else {
/* Before register allocation, we don't care about the latencies of
* instructions. All we care about is reducing live intervals of
- * variables so that we can avoid register spilling, or get 16-wide
+ * variables so that we can avoid register spilling, or get SIMD16
* shaders which naturally do a better job of hiding instruction
* latency.
- *
- * To do so, schedule our instructions in a roughly LIFO/depth-first
- * order: when new instructions become available as a result of
- * scheduling something, choose those first so that our result
- * hopefully is consumed quickly.
- *
- * The exception is messages that generate more than one result
- * register (AKA texturing). In those cases, the LIFO search would
- * normally tend to choose them quickly (because scheduling the
- * previous message not only unblocked the children using its result,
- * but also the MRF setup for the next sampler message, which in turn
- * unblocks the next sampler message).
*/
- for (schedule_node *node = (schedule_node *)instructions.get_tail();
- node != instructions.get_head()->prev;
- node = (schedule_node *)node->prev) {
- schedule_node *n = (schedule_node *)node;
+ foreach_in_list(schedule_node, n, &instructions) {
fs_inst *inst = (fs_inst *)n->inst;
- chosen = n;
- if (inst->regs_written <= 1)
- break;
+ if (!chosen) {
+ chosen = n;
+ continue;
+ }
+
+ /* Most important: If we can definitely reduce register pressure, do
+ * so immediately.
+ */
+ int register_pressure_benefit = get_register_pressure_benefit(n->inst);
+ int chosen_register_pressure_benefit =
+ get_register_pressure_benefit(chosen->inst);
+
+ if (register_pressure_benefit > 0 &&
+ register_pressure_benefit > chosen_register_pressure_benefit) {
+ chosen = n;
+ continue;
+ } else if (chosen_register_pressure_benefit > 0 &&
+ (register_pressure_benefit <
+ chosen_register_pressure_benefit)) {
+ continue;
+ }
+
+ if (mode == SCHEDULE_PRE_LIFO) {
+ /* Prefer instructions that recently became available for
+ * scheduling. These are the things that are most likely to
+ * (eventually) make a variable dead and reduce register pressure.
+ * Typical register pressure estimates don't work for us because
+ * most of our pressure comes from texturing, where no single
+ * instruction to schedule will make a vec4 value dead.
+ */
+ if (n->cand_generation > chosen->cand_generation) {
+ chosen = n;
+ continue;
+ } else if (n->cand_generation < chosen->cand_generation) {
+ continue;
+ }
+
+ /* On MRF-using chips, prefer non-SEND instructions. If we don't
+ * do this, then because we prefer instructions that just became
+ * candidates, we'll end up in a pattern of scheduling a SEND,
+ * then the MRFs for the next SEND, then the next SEND, then the
+ * MRFs, etc., without ever consuming the results of a send.
+ */
+ if (v->devinfo->gen < 7) {
+ fs_inst *chosen_inst = (fs_inst *)chosen->inst;
+
+ /* We use regs_written > 1 as our test for the kind of send
+ * instruction to avoid -- only sends generate many regs, and a
+ * single-result send is probably actually reducing register
+ * pressure.
+ */
+ if (inst->regs_written <= inst->dst.width / 8 &&
+ chosen_inst->regs_written > chosen_inst->dst.width / 8) {
+ chosen = n;
+ continue;
+ } else if (inst->regs_written > chosen_inst->regs_written) {
+ continue;
+ }
+ }
+ }
+
+ /* For instructions pushed on the cands list at the same time, prefer
+ * the one with the highest delay to the end of the program. This is
+ * most likely to have its values able to be consumed first (such as
+ * for a large tree of lowered ubo loads, which appear reversed in
+ * the instruction stream with respect to when they can be consumed).
+ */
+ if (n->delay > chosen->delay) {
+ chosen = n;
+ continue;
+ } else if (n->delay < chosen->delay) {
+ continue;
+ }
+
+ /* If all other metrics are equal, we prefer the first instruction in
+ * the list (program execution).
+ */
}
}
/* Of the instructions ready to execute or the closest to being ready,
* choose the oldest one.
*/
- foreach_list(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
-
+ foreach_in_list(schedule_node, n, &instructions) {
if (!chosen || n->unblocked_time < chosen_time) {
chosen = n;
chosen_time = n->unblocked_time;
}
void
-instruction_scheduler::schedule_instructions(backend_instruction *next_block_header)
+instruction_scheduler::schedule_instructions(bblock_t *block)
{
+ const struct brw_device_info *devinfo = bs->devinfo;
+ backend_instruction *inst = block->end();
time = 0;
/* Remove non-DAG heads from the list. */
- foreach_list_safe(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
+ foreach_in_list_safe(schedule_node, n, &instructions) {
if (n->parent_count != 0)
- n->remove();
+ n->remove();
}
+ unsigned cand_generation = 1;
while (!instructions.is_empty()) {
schedule_node *chosen = choose_instruction_to_schedule();
/* Schedule this instruction. */
assert(chosen);
chosen->remove();
- next_block_header->insert_before(chosen->inst);
+ inst->insert_before(block, chosen->inst);
instructions_to_schedule--;
+ update_register_pressure(chosen->inst);
/* Update the clock for how soon an instruction could start after the
* chosen one.
time = MAX2(time, chosen->unblocked_time);
if (debug) {
- printf("clock %4d, scheduled: ", time);
- bv->dump_instruction(chosen->inst);
+ fprintf(stderr, "clock %4d, scheduled: ", time);
+ bs->dump_instruction(chosen->inst);
}
/* Now that we've scheduled a new instruction, some of its
* be scheduled. Update the children's unblocked time for this
* DAG edge as we do so.
*/
- for (int i = 0; i < chosen->child_count; i++) {
- schedule_node *child = chosen->children[i];
+ for (int i = chosen->child_count - 1; i >= 0; i--) {
+ schedule_node *child = chosen->children[i];
+
+ child->unblocked_time = MAX2(child->unblocked_time,
+ time + chosen->child_latency[i]);
- child->unblocked_time = MAX2(child->unblocked_time,
- time + chosen->child_latency[i]);
+ if (debug) {
+ fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
+ bs->dump_instruction(child->inst);
+ }
- child->parent_count--;
- if (child->parent_count == 0) {
+ child->cand_generation = cand_generation;
+ child->parent_count--;
+ if (child->parent_count == 0) {
if (debug) {
- printf("now available: ");
- bv->dump_instruction(child->inst);
+ fprintf(stderr, "\t\tnow available\n");
}
- instructions.push_tail(child);
- }
+ instructions.push_head(child);
+ }
}
+ cand_generation++;
/* Shared resource: the mathbox. There's one mathbox per EU on Gen6+
* but it's more limited pre-gen6, so if we send something off to it then
* the next math instruction isn't going to make progress until the first
* is done.
*/
- if (chosen->inst->is_math()) {
- foreach_list(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
-
- if (n->inst->is_math())
- n->unblocked_time = MAX2(n->unblocked_time,
- time + chosen->latency);
- }
+ if (devinfo->gen < 6 && chosen->inst->is_math()) {
+ foreach_in_list(schedule_node, n, &instructions) {
+ if (n->inst->is_math())
+ n->unblocked_time = MAX2(n->unblocked_time,
+ time + chosen->latency);
+ }
}
}
+ if (block->end()->opcode == BRW_OPCODE_NOP)
+ block->end()->remove(block);
assert(instructions_to_schedule == 0);
}
void
-instruction_scheduler::run(exec_list *all_instructions)
+instruction_scheduler::run(cfg_t *cfg)
{
- backend_instruction *next_block_header =
- (backend_instruction *)all_instructions->head;
-
if (debug) {
- printf("\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc);
- bv->dump_instructions();
+ fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
+ post_reg_alloc);
+ bs->dump_instructions();
}
- while (!next_block_header->is_tail_sentinel()) {
- /* Add things to be scheduled until we get to a new BB. */
- while (!next_block_header->is_tail_sentinel()) {
- backend_instruction *inst = next_block_header;
- next_block_header = (backend_instruction *)next_block_header->next;
-
- add_inst(inst);
- if (inst->is_control_flow())
- break;
+ /* Populate the remaining GRF uses array to improve the pre-regalloc
+ * scheduling.
+ */
+ if (remaining_grf_uses) {
+ foreach_block_and_inst(block, backend_instruction, inst, cfg) {
+ count_remaining_grf_uses(inst);
}
+ }
+
+ foreach_block(block, cfg) {
+ if (block->end_ip - block->start_ip <= 1)
+ continue;
+
+ add_insts_from_block(block);
+
calculate_deps();
- schedule_instructions(next_block_header);
+
+ foreach_in_list(schedule_node, n, &instructions) {
+ compute_delay(n);
+ }
+
+ schedule_instructions(block);
}
if (debug) {
- printf("\nInstructions after scheduling (reg_alloc %d)\n", post_reg_alloc);
- bv->dump_instructions();
+ fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
+ post_reg_alloc);
+ bs->dump_instructions();
}
}
void
-fs_visitor::schedule_instructions(bool post_reg_alloc)
+fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
{
int grf_count;
- if (post_reg_alloc)
+ if (mode == SCHEDULE_POST)
grf_count = grf_used;
else
- grf_count = virtual_grf_count;
+ grf_count = alloc.count;
- fs_instruction_scheduler sched(this, grf_count, post_reg_alloc);
- sched.run(&instructions);
+ fs_instruction_scheduler sched(this, grf_count, mode);
+ sched.run(cfg);
- if (unlikely(INTEL_DEBUG & DEBUG_WM) && post_reg_alloc) {
- printf("fs%d estimated execution time: %d cycles\n",
- dispatch_width, sched.time);
+ if (unlikely(debug_enabled) && mode == SCHEDULE_POST) {
+ fprintf(stderr, "%s%d estimated execution time: %d cycles\n",
+ stage_abbrev, dispatch_width, sched.time);
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}
void
vec4_visitor::opt_schedule_instructions()
{
vec4_instruction_scheduler sched(this, prog_data->total_grf);
- sched.run(&instructions);
+ sched.run(cfg);
- if (unlikely(debug_flag)) {
- printf("vec4 estimated execution time: %d cycles\n", sched.time);
+ if (unlikely(debug_enabled)) {
+ fprintf(stderr, "%s estimated execution time: %d cycles\n",
+ stage_abbrev, sched.time);
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}