X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_schedule_instructions.cpp;h=40b5715cccde49caf27528331240be12026d5c60;hb=8776b1b14b229d110f283f5da8c3c36261068ede;hp=c9cd136cee839071213622feb05a015da07d9605;hpb=6148e94e26ff4b6cbba452121c76d564f25f194d;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp index c9cd136cee8..40b5715cccd 100644 --- a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp +++ b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp @@ -27,6 +27,8 @@ #include "brw_fs.h" #include "brw_vec4.h" +#include "brw_cfg.h" +#include "brw_shader.h" #include "glsl/glsl_types.h" #include "glsl/ir_optimization.h" @@ -421,8 +423,8 @@ public: void add_dep(schedule_node *before, schedule_node *after, int latency); void add_dep(schedule_node *before, schedule_node *after); - void run(exec_list *instructions); - void add_inst(backend_instruction *inst); + void run(cfg_t *cfg); + void add_insts_from_block(bblock_t *block); void compute_delay(schedule_node *node); virtual void calculate_deps() = 0; virtual schedule_node *choose_instruction_to_schedule() = 0; @@ -440,7 +442,7 @@ public: virtual void update_register_pressure(backend_instruction *inst) = 0; virtual int get_register_pressure_benefit(backend_instruction *inst) = 0; - void schedule_instructions(backend_instruction *next_block_header); + void schedule_instructions(bblock_t *block); void *mem_ctx; @@ -505,7 +507,7 @@ fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be) if (inst->dst.file == GRF) remaining_grf_uses[inst->dst.reg]++; - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -526,7 +528,7 @@ fs_instruction_scheduler::update_register_pressure(backend_instruction *be) grf_active[inst->dst.reg] = true; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file == GRF) { remaining_grf_uses[inst->src[i].reg]--; grf_active[inst->src[i].reg] = true; @@ -547,7 +549,7 @@ fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be) benefit -= v->virtual_grf_sizes[inst->dst.reg]; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -624,17 +626,28 @@ schedule_node::schedule_node(backend_instruction *inst, } void -instruction_scheduler::add_inst(backend_instruction *inst) +instruction_scheduler::add_insts_from_block(bblock_t *block) { - schedule_node *n = new(mem_ctx) schedule_node(inst, this); + /* Removing the last instruction from a basic block removes the block as + * well, so put a NOP at the end to keep it alive. + */ + if (!block->end()->is_control_flow()) { + backend_instruction *nop = new(mem_ctx) backend_instruction(); + nop->opcode = BRW_OPCODE_NOP; + block->end()->insert_after(block, nop); + } + + foreach_inst_in_block_safe(backend_instruction, inst, block) { + if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow()) + continue; - assert(!inst->is_head_sentinel()); - assert(!inst->is_tail_sentinel()); + schedule_node *n = new(mem_ctx) schedule_node(inst, this); - this->instructions_to_schedule++; + this->instructions_to_schedule++; - inst->remove(); - instructions.push_tail(n); + inst->remove(block); + instructions.push_tail(n); + } } /** Recursive computation of the delay member of a node. */ @@ -660,7 +673,7 @@ instruction_scheduler::compute_delay(schedule_node *n) */ void instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, - int latency) + int latency) { if (!before || !after) return; @@ -669,22 +682,22 @@ instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, for (int i = 0; i < before->child_count; i++) { if (before->children[i] == after) { - before->child_latency[i] = MAX2(before->child_latency[i], latency); - return; + before->child_latency[i] = MAX2(before->child_latency[i], latency); + return; } } if (before->child_array_size <= before->child_count) { if (before->child_array_size < 16) - before->child_array_size = 16; + before->child_array_size = 16; else - before->child_array_size *= 2; + before->child_array_size *= 2; before->children = reralloc(mem_ctx, before->children, - schedule_node *, - before->child_array_size); + schedule_node *, + before->child_array_size); before->child_latency = reralloc(mem_ctx, before->child_latency, - int, before->child_array_size); + int, before->child_array_size); } before->children[before->child_count] = after; @@ -715,15 +728,15 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) if (prev) { while (!prev->is_head_sentinel()) { - add_dep(prev, n, 0); - prev = (schedule_node *)prev->prev; + add_dep(prev, n, 0); + prev = (schedule_node *)prev->prev; } } if (next) { while (!next->is_tail_sentinel()) { - add_dep(n, next, 0); - next = (schedule_node *)next->next; + add_dep(n, next, 0); + next = (schedule_node *)next->next; } } } @@ -734,20 +747,17 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) bool fs_instruction_scheduler::is_compressed(fs_inst *inst) { - return (v->dispatch_width == 16 && - !inst->force_uncompressed && - !inst->force_sechalf); + return inst->exec_size == 16; } void fs_instruction_scheduler::calculate_deps() { - /* Pre-register-allocation, this tracks the last write per VGRF (so - * different reg_offsets within it can interfere when they shouldn't). + /* Pre-register-allocation, this tracks the last write per VGRF offset. * After register allocation, reg_offsets are gone and we track individual * GRF registers. */ - schedule_node *last_grf_write[grf_count]; + schedule_node *last_grf_write[grf_count * 16]; schedule_node *last_mrf_write[BRW_MAX_MRF]; schedule_node *last_conditional_mod[2] = { NULL, NULL }; schedule_node *last_accumulator_write = NULL; @@ -772,8 +782,7 @@ fs_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT || @@ -781,18 +790,20 @@ fs_instruction_scheduler::calculate_deps() add_barrier_deps(n); /* read-after-write deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(last_grf_write[inst->src[i].reg + r], n); } else { - add_dep(last_grf_write[inst->src[i].reg], n); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -803,28 +814,28 @@ fs_instruction_scheduler::calculate_deps() } } else if (inst->src[i].is_accumulator()) { add_dep(last_accumulator_write, n); - } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM && + } else if (inst->src[i].file != BAD_FILE && + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && (inst->src[i].file != HW_REG || inst->src[i].fixed_hw_reg.file != IMM)) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(last_mrf_write[inst->base_mrf + i], n); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(last_mrf_write[inst->base_mrf + i], n); + } } if (inst->reads_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n); + add_dep(last_conditional_mod[inst->flag_subreg], n); } if (inst->reads_accumulator_implicitly()) { @@ -834,29 +845,31 @@ fs_instruction_scheduler::calculate_deps() /* write-after-write deps. */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) { + for (int r = 0; r < inst->regs_written; r++) { add_dep(last_grf_write[inst->dst.reg + r], n); last_grf_write[inst->dst.reg + r] = n; } } else { - add_dep(last_grf_write[inst->dst.reg], n); - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n); + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - } + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; @@ -868,22 +881,22 @@ fs_instruction_scheduler::calculate_deps() last_accumulator_write = n; } else if (inst->dst.file != BAD_FILE && !inst->dst.is_null()) { - add_barrier_deps(n); + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - add_dep(last_mrf_write[inst->base_mrf + i], n); - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + add_dep(last_mrf_write[inst->base_mrf + i], n); + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n, 0); - last_conditional_mod[inst->flag_subreg] = n; + add_dep(last_conditional_mod[inst->flag_subreg], n, 0); + last_conditional_mod[inst->flag_subreg] = n; } - if (inst->writes_accumulator_implicitly(v->brw->gen) && + if (inst->writes_accumulator_implicitly(v->brw) && !inst->dst.is_accumulator()) { add_dep(last_accumulator_write, n); last_accumulator_write = n; @@ -900,24 +913,26 @@ fs_instruction_scheduler::calculate_deps() exec_node *node; exec_node *prev; for (node = instructions.get_tail(), prev = node->prev; - !node->is_head_sentinel(); - node = prev, prev = node->prev) { + !node->is_head_sentinel(); + node = prev, prev = node->prev) { schedule_node *n = (schedule_node *)node; fs_inst *inst = (fs_inst *)n->inst; /* write-after-read deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(n, last_grf_write[inst->src[i].reg + r]); } else { - add_dep(n, last_grf_write[inst->src[i].reg]); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -929,27 +944,27 @@ fs_instruction_scheduler::calculate_deps() } else if (inst->src[i].is_accumulator()) { add_dep(n, last_accumulator_write); } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM && + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && (inst->src[i].file != HW_REG || inst->src[i].fixed_hw_reg.file != IMM)) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(n, last_mrf_write[inst->base_mrf + i], 2); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(n, last_mrf_write[inst->base_mrf + i], 2); + } } if (inst->reads_flag()) { - add_dep(n, last_conditional_mod[inst->flag_subreg]); + add_dep(n, last_conditional_mod[inst->flag_subreg]); } if (inst->reads_accumulator_implicitly()) { @@ -961,26 +976,28 @@ fs_instruction_scheduler::calculate_deps() */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) + for (int r = 0; r < inst->regs_written; r++) last_grf_write[inst->dst.reg + r] = n; } else { - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - last_mrf_write[reg] = n; + last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; - last_mrf_write[reg] = n; - } + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; @@ -991,20 +1008,20 @@ fs_instruction_scheduler::calculate_deps() last_accumulator_write = n; } else if (inst->dst.file != BAD_FILE && !inst->dst.is_null()) { - add_barrier_deps(n); + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - last_conditional_mod[inst->flag_subreg] = n; + last_conditional_mod[inst->flag_subreg] = n; } - if (inst->writes_accumulator_implicitly(v->brw->gen)) { + if (inst->writes_accumulator_implicitly(v->brw)) { last_accumulator_write = n; } } @@ -1037,8 +1054,7 @@ vec4_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { vec4_instruction *inst = (vec4_instruction *)n->inst; if (inst->has_side_effects()) @@ -1115,7 +1131,7 @@ vec4_instruction_scheduler::calculate_deps() last_conditional_mod = n; } - if (inst->writes_accumulator_implicitly(v->brw->gen) && + if (inst->writes_accumulator_implicitly(v->brw) && !inst->dst.is_accumulator()) { add_dep(last_accumulator_write, n); last_accumulator_write = n; @@ -1201,7 +1217,7 @@ vec4_instruction_scheduler::calculate_deps() last_conditional_mod = n; } - if (inst->writes_accumulator_implicitly(v->brw->gen)) { + if (inst->writes_accumulator_implicitly(v->brw)) { last_accumulator_write = n; } } @@ -1210,6 +1226,7 @@ vec4_instruction_scheduler::calculate_deps() schedule_node * fs_instruction_scheduler::choose_instruction_to_schedule() { + struct brw_context *brw = v->brw; schedule_node *chosen = NULL; if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) { @@ -1218,9 +1235,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() /* Of the instructions ready to execute or the closest to * being ready, choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1233,8 +1248,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * shaders which naturally do a better job of hiding instruction * latency. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (!chosen) { @@ -1280,7 +1294,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * then the MRFs for the next SEND, then the next SEND, then the * MRFs, etc., without ever consuming the results of a send. */ - if (v->brw->gen < 7) { + if (brw->gen < 7) { fs_inst *chosen_inst = (fs_inst *)chosen->inst; /* We use regs_written > 1 as our test for the kind of send @@ -1288,7 +1302,8 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * single-result send is probably actually reducing register * pressure. */ - if (inst->regs_written <= 1 && chosen_inst->regs_written > 1) { + if (inst->regs_written <= inst->dst.width / 8 && + chosen_inst->regs_written > chosen_inst->dst.width / 8) { chosen = n; continue; } else if (inst->regs_written > chosen_inst->regs_written) { @@ -1328,9 +1343,7 @@ vec4_instruction_scheduler::choose_instruction_to_schedule() /* Of the instructions ready to execute or the closest to being ready, * choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1357,15 +1370,16 @@ vec4_instruction_scheduler::issue_time(backend_instruction *inst) } void -instruction_scheduler::schedule_instructions(backend_instruction *next_block_header) +instruction_scheduler::schedule_instructions(bblock_t *block) { + struct brw_context *brw = bv->brw; + backend_instruction *inst = block->end(); time = 0; /* Remove non-DAG heads from the list. */ - foreach_list_safe(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list_safe(schedule_node, n, &instructions) { if (n->parent_count != 0) - n->remove(); + n->remove(); } unsigned cand_generation = 1; @@ -1375,7 +1389,7 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea /* Schedule this instruction. */ assert(chosen); chosen->remove(); - next_block_header->insert_before(chosen->inst); + inst->insert_before(block, chosen->inst); instructions_to_schedule--; update_register_pressure(chosen->inst); @@ -1402,10 +1416,10 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * DAG edge as we do so. */ for (int i = chosen->child_count - 1; i >= 0; i--) { - schedule_node *child = chosen->children[i]; + schedule_node *child = chosen->children[i]; - child->unblocked_time = MAX2(child->unblocked_time, - time + chosen->child_latency[i]); + child->unblocked_time = MAX2(child->unblocked_time, + time + chosen->child_latency[i]); if (debug) { fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count); @@ -1413,13 +1427,13 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea } child->cand_generation = cand_generation; - child->parent_count--; - if (child->parent_count == 0) { + child->parent_count--; + if (child->parent_count == 0) { if (debug) { fprintf(stderr, "\t\tnow available\n"); } - instructions.push_head(child); - } + instructions.push_head(child); + } } cand_generation++; @@ -1428,26 +1442,23 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * the next math instruction isn't going to make progress until the first * is done. */ - if (chosen->inst->is_math()) { - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - - if (n->inst->is_math()) - n->unblocked_time = MAX2(n->unblocked_time, - time + chosen->latency); - } + if (brw->gen < 6 && chosen->inst->is_math()) { + foreach_in_list(schedule_node, n, &instructions) { + if (n->inst->is_math()) + n->unblocked_time = MAX2(n->unblocked_time, + time + chosen->latency); + } } } + if (block->end()->opcode == BRW_OPCODE_NOP) + block->end()->remove(block); assert(instructions_to_schedule == 0); } void -instruction_scheduler::run(exec_list *all_instructions) +instruction_scheduler::run(cfg_t *cfg) { - backend_instruction *next_block_header = - (backend_instruction *)all_instructions->head; - if (debug) { fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc); @@ -1458,29 +1469,24 @@ instruction_scheduler::run(exec_list *all_instructions) * scheduling. */ if (remaining_grf_uses) { - foreach_list(node, all_instructions) { - count_remaining_grf_uses((backend_instruction *)node); + foreach_block_and_inst(block, backend_instruction, inst, cfg) { + count_remaining_grf_uses(inst); } } - while (!next_block_header->is_tail_sentinel()) { - /* Add things to be scheduled until we get to a new BB. */ - while (!next_block_header->is_tail_sentinel()) { - backend_instruction *inst = next_block_header; - next_block_header = (backend_instruction *)next_block_header->next; + foreach_block(block, cfg) { + if (block->end_ip - block->start_ip <= 1) + continue; + + add_insts_from_block(block); - add_inst(inst); - if (inst->is_control_flow()) - break; - } calculate_deps(); - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { compute_delay(n); } - schedule_instructions(next_block_header); + schedule_instructions(block); } if (debug) { @@ -1500,7 +1506,7 @@ fs_visitor::schedule_instructions(instruction_scheduler_mode mode) grf_count = virtual_grf_count; fs_instruction_scheduler sched(this, grf_count, mode); - sched.run(&instructions); + sched.run(cfg); if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) { fprintf(stderr, "fs%d estimated execution time: %d cycles\n", @@ -1514,7 +1520,7 @@ void vec4_visitor::opt_schedule_instructions() { vec4_instruction_scheduler sched(this, prog_data->total_grf); - sched.run(&instructions); + sched.run(cfg); if (unlikely(debug_flag)) { fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time);