X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_schedule_instructions.cpp;h=40b5715cccde49caf27528331240be12026d5c60;hb=8776b1b14b229d110f283f5da8c3c36261068ede;hp=a61bbab613b95b8176a730eaf890f5302d54863c;hpb=746e3e3b3ad20a29ee6de64d663d2dc11deac06e;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp index a61bbab613b..40b5715cccd 100644 --- a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp +++ b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp @@ -27,6 +27,8 @@ #include "brw_fs.h" #include "brw_vec4.h" +#include "brw_cfg.h" +#include "brw_shader.h" #include "glsl/glsl_types.h" #include "glsl/ir_optimization.h" @@ -132,20 +134,20 @@ schedule_node::set_latency_gen7(bool is_haswell) case BRW_OPCODE_MAD: /* 2 cycles * (since the last two src operands are in different register banks): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 3 cycles on IVB, 4 on HSW * (since the last two src operands are in the same register bank): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 18 cycles on IVB, 16 on HSW * (since the last two src operands are in different register banks): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q }; * * 20 cycles on IVB, 18 on HSW * (since the last two src operands are in the same register bank): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; */ @@ -158,20 +160,20 @@ schedule_node::set_latency_gen7(bool is_haswell) case BRW_OPCODE_LRP: /* 2 cycles * (since the last two src operands are in different register banks): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 3 cycles on IVB, 4 on HSW * (since the last two src operands are in the same register bank): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 16 cycles on IVB, 14 on HSW * (since the last two src operands are in different register banks): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; * * 16 cycles * (since the last two src operands are in the same register bank): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; */ @@ -421,8 +423,8 @@ public: void add_dep(schedule_node *before, schedule_node *after, int latency); void add_dep(schedule_node *before, schedule_node *after); - void run(exec_list *instructions); - void add_inst(backend_instruction *inst); + void run(cfg_t *cfg); + void add_insts_from_block(bblock_t *block); void compute_delay(schedule_node *node); virtual void calculate_deps() = 0; virtual schedule_node *choose_instruction_to_schedule() = 0; @@ -440,7 +442,7 @@ public: virtual void update_register_pressure(backend_instruction *inst) = 0; virtual int get_register_pressure_benefit(backend_instruction *inst) = 0; - void schedule_instructions(backend_instruction *next_block_header); + void schedule_instructions(bblock_t *block); void *mem_ctx; @@ -505,7 +507,7 @@ fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be) if (inst->dst.file == GRF) remaining_grf_uses[inst->dst.reg]++; - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -526,7 +528,7 @@ fs_instruction_scheduler::update_register_pressure(backend_instruction *be) grf_active[inst->dst.reg] = true; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file == GRF) { remaining_grf_uses[inst->src[i].reg]--; grf_active[inst->src[i].reg] = true; @@ -547,7 +549,7 @@ fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be) benefit -= v->virtual_grf_sizes[inst->dst.reg]; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -624,17 +626,28 @@ schedule_node::schedule_node(backend_instruction *inst, } void -instruction_scheduler::add_inst(backend_instruction *inst) +instruction_scheduler::add_insts_from_block(bblock_t *block) { - schedule_node *n = new(mem_ctx) schedule_node(inst, this); + /* Removing the last instruction from a basic block removes the block as + * well, so put a NOP at the end to keep it alive. + */ + if (!block->end()->is_control_flow()) { + backend_instruction *nop = new(mem_ctx) backend_instruction(); + nop->opcode = BRW_OPCODE_NOP; + block->end()->insert_after(block, nop); + } + + foreach_inst_in_block_safe(backend_instruction, inst, block) { + if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow()) + continue; - assert(!inst->is_head_sentinel()); - assert(!inst->is_tail_sentinel()); + schedule_node *n = new(mem_ctx) schedule_node(inst, this); - this->instructions_to_schedule++; + this->instructions_to_schedule++; - inst->remove(); - instructions.push_tail(n); + inst->remove(block); + instructions.push_tail(n); + } } /** Recursive computation of the delay member of a node. */ @@ -660,7 +673,7 @@ instruction_scheduler::compute_delay(schedule_node *n) */ void instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, - int latency) + int latency) { if (!before || !after) return; @@ -669,22 +682,22 @@ instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, for (int i = 0; i < before->child_count; i++) { if (before->children[i] == after) { - before->child_latency[i] = MAX2(before->child_latency[i], latency); - return; + before->child_latency[i] = MAX2(before->child_latency[i], latency); + return; } } if (before->child_array_size <= before->child_count) { if (before->child_array_size < 16) - before->child_array_size = 16; + before->child_array_size = 16; else - before->child_array_size *= 2; + before->child_array_size *= 2; before->children = reralloc(mem_ctx, before->children, - schedule_node *, - before->child_array_size); + schedule_node *, + before->child_array_size); before->child_latency = reralloc(mem_ctx, before->child_latency, - int, before->child_array_size); + int, before->child_array_size); } before->children[before->child_count] = after; @@ -715,15 +728,15 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) if (prev) { while (!prev->is_head_sentinel()) { - add_dep(prev, n, 0); - prev = (schedule_node *)prev->prev; + add_dep(prev, n, 0); + prev = (schedule_node *)prev->prev; } } if (next) { while (!next->is_tail_sentinel()) { - add_dep(n, next, 0); - next = (schedule_node *)next->next; + add_dep(n, next, 0); + next = (schedule_node *)next->next; } } } @@ -734,22 +747,20 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) bool fs_instruction_scheduler::is_compressed(fs_inst *inst) { - return (v->dispatch_width == 16 && - !inst->force_uncompressed && - !inst->force_sechalf); + return inst->exec_size == 16; } void fs_instruction_scheduler::calculate_deps() { - /* Pre-register-allocation, this tracks the last write per VGRF (so - * different reg_offsets within it can interfere when they shouldn't). + /* Pre-register-allocation, this tracks the last write per VGRF offset. * After register allocation, reg_offsets are gone and we track individual * GRF registers. */ - schedule_node *last_grf_write[grf_count]; + schedule_node *last_grf_write[grf_count * 16]; schedule_node *last_mrf_write[BRW_MAX_MRF]; schedule_node *last_conditional_mod[2] = { NULL, NULL }; + schedule_node *last_accumulator_write = NULL; /* Fixed HW registers are assumed to be separate from the virtual * GRFs, so they can be tracked separately. We don't really write * to fixed GRFs much, so don't bother tracking them on a more @@ -771,8 +782,7 @@ fs_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT || @@ -780,18 +790,20 @@ fs_instruction_scheduler::calculate_deps() add_barrier_deps(n); /* read-after-write deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(last_grf_write[inst->src[i].reg + r], n); } else { - add_dep(last_grf_write[inst->src[i].reg], n); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -800,74 +812,94 @@ fs_instruction_scheduler::calculate_deps() } else { add_dep(last_fixed_grf_write, n); } - } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + } else if (inst->src[i].is_accumulator()) { + add_dep(last_accumulator_write, n); + } else if (inst->src[i].file != BAD_FILE && + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(last_mrf_write[inst->base_mrf + i], n); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(last_mrf_write[inst->base_mrf + i], n); + } } if (inst->reads_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n); + add_dep(last_conditional_mod[inst->flag_subreg], n); + } + + if (inst->reads_accumulator_implicitly()) { + add_dep(last_accumulator_write, n); } /* write-after-write deps. */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) { + for (int r = 0; r < inst->regs_written; r++) { add_dep(last_grf_write[inst->dst.reg + r], n); last_grf_write[inst->dst.reg + r] = n; } } else { - add_dep(last_grf_write[inst->dst.reg], n); - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n); + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - } + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; } else { last_fixed_grf_write = n; } - } else if (inst->dst.file != BAD_FILE) { - add_barrier_deps(n); + } else if (inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - add_dep(last_mrf_write[inst->base_mrf + i], n); - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + add_dep(last_mrf_write[inst->base_mrf + i], n); + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n, 0); - last_conditional_mod[inst->flag_subreg] = n; + add_dep(last_conditional_mod[inst->flag_subreg], n, 0); + last_conditional_mod[inst->flag_subreg] = n; + } + + if (inst->writes_accumulator_implicitly(v->brw) && + !inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; } } @@ -875,29 +907,32 @@ fs_instruction_scheduler::calculate_deps() memset(last_grf_write, 0, sizeof(last_grf_write)); memset(last_mrf_write, 0, sizeof(last_mrf_write)); memset(last_conditional_mod, 0, sizeof(last_conditional_mod)); + last_accumulator_write = NULL; last_fixed_grf_write = NULL; exec_node *node; exec_node *prev; for (node = instructions.get_tail(), prev = node->prev; - !node->is_head_sentinel(); - node = prev, prev = node->prev) { + !node->is_head_sentinel(); + node = prev, prev = node->prev) { schedule_node *n = (schedule_node *)node; fs_inst *inst = (fs_inst *)n->inst; /* write-after-read deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(n, last_grf_write[inst->src[i].reg + r]); } else { - add_dep(n, last_grf_write[inst->src[i].reg]); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -906,26 +941,34 @@ fs_instruction_scheduler::calculate_deps() } else { add_dep(n, last_fixed_grf_write); } + } else if (inst->src[i].is_accumulator()) { + add_dep(n, last_accumulator_write); } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(n, last_mrf_write[inst->base_mrf + i], 2); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(n, last_mrf_write[inst->base_mrf + i], 2); + } } if (inst->reads_flag()) { - add_dep(n, last_conditional_mod[inst->flag_subreg]); + add_dep(n, last_conditional_mod[inst->flag_subreg]); + } + + if (inst->reads_accumulator_implicitly()) { + add_dep(n, last_accumulator_write); } /* Update the things this instruction wrote, so earlier reads @@ -933,44 +976,53 @@ fs_instruction_scheduler::calculate_deps() */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) + for (int r = 0; r < inst->regs_written; r++) last_grf_write[inst->dst.reg + r] = n; } else { - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - last_mrf_write[reg] = n; + last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; - last_mrf_write[reg] = n; - } + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; } else { last_fixed_grf_write = n; } - } else if (inst->dst.file != BAD_FILE) { - add_barrier_deps(n); + } else if (inst->dst.is_accumulator()) { + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - last_conditional_mod[inst->flag_subreg] = n; + last_conditional_mod[inst->flag_subreg] = n; + } + + if (inst->writes_accumulator_implicitly(v->brw)) { + last_accumulator_write = n; } } } @@ -981,6 +1033,7 @@ vec4_instruction_scheduler::calculate_deps() schedule_node *last_grf_write[grf_count]; schedule_node *last_mrf_write[BRW_MAX_MRF]; schedule_node *last_conditional_mod = NULL; + schedule_node *last_accumulator_write = NULL; /* Fixed HW registers are assumed to be separate from the virtual * GRFs, so they can be tracked separately. We don't really write * to fixed GRFs much, so don't bother tracking them on a more @@ -1001,8 +1054,7 @@ vec4_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { vec4_instruction *inst = (vec4_instruction *)n->inst; if (inst->has_side_effects()) @@ -1016,9 +1068,14 @@ vec4_instruction_scheduler::calculate_deps() (inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)) { add_dep(last_fixed_grf_write, n); + } else if (inst->src[i].is_accumulator()) { + assert(last_accumulator_write); + add_dep(last_accumulator_write, n); } else if (inst->src[i].file != BAD_FILE && inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { /* No reads from MRF, and ATTR is already translated away */ assert(inst->src[i].file != MRF && inst->src[i].file != ATTR); @@ -1034,11 +1091,16 @@ vec4_instruction_scheduler::calculate_deps() add_dep(last_mrf_write[inst->base_mrf + i], n); } - if (inst->depends_on_flags()) { + if (inst->reads_flag()) { assert(last_conditional_mod); add_dep(last_conditional_mod, n); } + if (inst->reads_accumulator_implicitly()) { + assert(last_accumulator_write); + add_dep(last_accumulator_write, n); + } + /* write-after-write deps. */ if (inst->dst.file == GRF) { add_dep(last_grf_write[inst->dst.reg], n); @@ -1049,7 +1111,11 @@ vec4_instruction_scheduler::calculate_deps() } else if (inst->dst.file == HW_REG && inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { last_fixed_grf_write = n; - } else if (inst->dst.file != BAD_FILE) { + } else if (inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { add_barrier_deps(n); } @@ -1060,16 +1126,23 @@ vec4_instruction_scheduler::calculate_deps() } } - if (inst->conditional_mod) { + if (inst->writes_flag()) { add_dep(last_conditional_mod, n, 0); last_conditional_mod = n; } + + if (inst->writes_accumulator_implicitly(v->brw) && + !inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } } /* bottom-to-top dependencies: WAR */ memset(last_grf_write, 0, sizeof(last_grf_write)); memset(last_mrf_write, 0, sizeof(last_mrf_write)); last_conditional_mod = NULL; + last_accumulator_write = NULL; last_fixed_grf_write = NULL; exec_node *node; @@ -1088,9 +1161,13 @@ vec4_instruction_scheduler::calculate_deps() (inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)) { add_dep(n, last_fixed_grf_write); + } else if (inst->src[i].is_accumulator()) { + add_dep(n, last_accumulator_write); } else if (inst->src[i].file != BAD_FILE && inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { assert(inst->src[i].file != MRF && inst->src[i].file != ATTR); add_barrier_deps(n); @@ -1105,10 +1182,14 @@ vec4_instruction_scheduler::calculate_deps() add_dep(n, last_mrf_write[inst->base_mrf + i], 2); } - if (inst->depends_on_flags()) { + if (inst->reads_flag()) { add_dep(n, last_conditional_mod); } + if (inst->reads_accumulator_implicitly()) { + add_dep(n, last_accumulator_write); + } + /* Update the things this instruction wrote, so earlier reads * can mark this as WAR dependency. */ @@ -1119,7 +1200,10 @@ vec4_instruction_scheduler::calculate_deps() } else if (inst->dst.file == HW_REG && inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { last_fixed_grf_write = n; - } else if (inst->dst.file != BAD_FILE) { + } else if (inst->dst.is_accumulator()) { + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { add_barrier_deps(n); } @@ -1129,15 +1213,20 @@ vec4_instruction_scheduler::calculate_deps() } } - if (inst->conditional_mod) { + if (inst->writes_flag()) { last_conditional_mod = n; } + + if (inst->writes_accumulator_implicitly(v->brw)) { + last_accumulator_write = n; + } } } schedule_node * fs_instruction_scheduler::choose_instruction_to_schedule() { + struct brw_context *brw = v->brw; schedule_node *chosen = NULL; if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) { @@ -1146,9 +1235,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() /* Of the instructions ready to execute or the closest to * being ready, choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1161,8 +1248,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * shaders which naturally do a better job of hiding instruction * latency. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (!chosen) { @@ -1208,7 +1294,7 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * then the MRFs for the next SEND, then the next SEND, then the * MRFs, etc., without ever consuming the results of a send. */ - if (v->brw->gen < 7) { + if (brw->gen < 7) { fs_inst *chosen_inst = (fs_inst *)chosen->inst; /* We use regs_written > 1 as our test for the kind of send @@ -1216,7 +1302,8 @@ fs_instruction_scheduler::choose_instruction_to_schedule() * single-result send is probably actually reducing register * pressure. */ - if (inst->regs_written <= 1 && chosen_inst->regs_written > 1) { + if (inst->regs_written <= inst->dst.width / 8 && + chosen_inst->regs_written > chosen_inst->dst.width / 8) { chosen = n; continue; } else if (inst->regs_written > chosen_inst->regs_written) { @@ -1256,9 +1343,7 @@ vec4_instruction_scheduler::choose_instruction_to_schedule() /* Of the instructions ready to execute or the closest to being ready, * choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1285,15 +1370,16 @@ vec4_instruction_scheduler::issue_time(backend_instruction *inst) } void -instruction_scheduler::schedule_instructions(backend_instruction *next_block_header) +instruction_scheduler::schedule_instructions(bblock_t *block) { + struct brw_context *brw = bv->brw; + backend_instruction *inst = block->end(); time = 0; /* Remove non-DAG heads from the list. */ - foreach_list_safe(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list_safe(schedule_node, n, &instructions) { if (n->parent_count != 0) - n->remove(); + n->remove(); } unsigned cand_generation = 1; @@ -1303,7 +1389,7 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea /* Schedule this instruction. */ assert(chosen); chosen->remove(); - next_block_header->insert_before(chosen->inst); + inst->insert_before(block, chosen->inst); instructions_to_schedule--; update_register_pressure(chosen->inst); @@ -1320,7 +1406,7 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea time = MAX2(time, chosen->unblocked_time); if (debug) { - printf("clock %4d, scheduled: ", time); + fprintf(stderr, "clock %4d, scheduled: ", time); bv->dump_instruction(chosen->inst); } @@ -1330,24 +1416,24 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * DAG edge as we do so. */ for (int i = chosen->child_count - 1; i >= 0; i--) { - schedule_node *child = chosen->children[i]; + schedule_node *child = chosen->children[i]; - child->unblocked_time = MAX2(child->unblocked_time, - time + chosen->child_latency[i]); + child->unblocked_time = MAX2(child->unblocked_time, + time + chosen->child_latency[i]); if (debug) { - printf("\tchild %d, %d parents: ", i, child->parent_count); + fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count); bv->dump_instruction(child->inst); } child->cand_generation = cand_generation; - child->parent_count--; - if (child->parent_count == 0) { + child->parent_count--; + if (child->parent_count == 0) { if (debug) { - printf("\t\tnow available\n"); + fprintf(stderr, "\t\tnow available\n"); } - instructions.push_head(child); - } + instructions.push_head(child); + } } cand_generation++; @@ -1356,28 +1442,26 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * the next math instruction isn't going to make progress until the first * is done. */ - if (chosen->inst->is_math()) { - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - - if (n->inst->is_math()) - n->unblocked_time = MAX2(n->unblocked_time, - time + chosen->latency); - } + if (brw->gen < 6 && chosen->inst->is_math()) { + foreach_in_list(schedule_node, n, &instructions) { + if (n->inst->is_math()) + n->unblocked_time = MAX2(n->unblocked_time, + time + chosen->latency); + } } } + if (block->end()->opcode == BRW_OPCODE_NOP) + block->end()->remove(block); assert(instructions_to_schedule == 0); } void -instruction_scheduler::run(exec_list *all_instructions) +instruction_scheduler::run(cfg_t *cfg) { - backend_instruction *next_block_header = - (backend_instruction *)all_instructions->head; - if (debug) { - printf("\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc); + fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n", + post_reg_alloc); bv->dump_instructions(); } @@ -1385,33 +1469,29 @@ instruction_scheduler::run(exec_list *all_instructions) * scheduling. */ if (remaining_grf_uses) { - foreach_list(node, all_instructions) { - count_remaining_grf_uses((backend_instruction *)node); + foreach_block_and_inst(block, backend_instruction, inst, cfg) { + count_remaining_grf_uses(inst); } } - while (!next_block_header->is_tail_sentinel()) { - /* Add things to be scheduled until we get to a new BB. */ - while (!next_block_header->is_tail_sentinel()) { - backend_instruction *inst = next_block_header; - next_block_header = (backend_instruction *)next_block_header->next; + foreach_block(block, cfg) { + if (block->end_ip - block->start_ip <= 1) + continue; + + add_insts_from_block(block); - add_inst(inst); - if (inst->is_control_flow()) - break; - } calculate_deps(); - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { compute_delay(n); } - schedule_instructions(next_block_header); + schedule_instructions(block); } if (debug) { - printf("\nInstructions after scheduling (reg_alloc %d)\n", post_reg_alloc); + fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n", + post_reg_alloc); bv->dump_instructions(); } } @@ -1426,10 +1506,10 @@ fs_visitor::schedule_instructions(instruction_scheduler_mode mode) grf_count = virtual_grf_count; fs_instruction_scheduler sched(this, grf_count, mode); - sched.run(&instructions); + sched.run(cfg); if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) { - printf("fs%d estimated execution time: %d cycles\n", + fprintf(stderr, "fs%d estimated execution time: %d cycles\n", dispatch_width, sched.time); } @@ -1440,10 +1520,10 @@ void vec4_visitor::opt_schedule_instructions() { vec4_instruction_scheduler sched(this, prog_data->total_grf); - sched.run(&instructions); + sched.run(cfg); if (unlikely(debug_flag)) { - printf("vec4 estimated execution time: %d cycles\n", sched.time); + fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time); } invalidate_live_intervals();