X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_schedule_instructions.cpp;h=40b5715cccde49caf27528331240be12026d5c60;hb=8776b1b14b229d110f283f5da8c3c36261068ede;hp=5710380f12eaff843a903801d0ae12588f7cc03e;hpb=f72a0d99fed5d6205431a59775484cde3442cceb;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp index 5710380f12e..40b5715cccd 100644 --- a/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp +++ b/src/mesa/drivers/dri/i965/brw_schedule_instructions.cpp @@ -27,6 +27,8 @@ #include "brw_fs.h" #include "brw_vec4.h" +#include "brw_cfg.h" +#include "brw_shader.h" #include "glsl/glsl_types.h" #include "glsl/ir_optimization.h" @@ -132,20 +134,20 @@ schedule_node::set_latency_gen7(bool is_haswell) case BRW_OPCODE_MAD: /* 2 cycles * (since the last two src operands are in different register banks): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 3 cycles on IVB, 4 on HSW * (since the last two src operands are in the same register bank): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 18 cycles on IVB, 16 on HSW * (since the last two src operands are in different register banks): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q }; * * 20 cycles on IVB, 18 on HSW * (since the last two src operands are in the same register bank): - * mad(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; */ @@ -158,20 +160,20 @@ schedule_node::set_latency_gen7(bool is_haswell) case BRW_OPCODE_LRP: /* 2 cycles * (since the last two src operands are in different register banks): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 3 cycles on IVB, 4 on HSW * (since the last two src operands are in the same register bank): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * * 16 cycles on IVB, 14 on HSW * (since the last two src operands are in different register banks): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g3.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; * * 16 cycles * (since the last two src operands are in the same register bank): - * lrp(8) g4<1>F g2.2<4,1,1>F.x g2<4,1,1>F.x g2.1<4,1,1>F.x { align16 WE_normal 1Q }; + * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q }; * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q }; */ @@ -335,6 +337,8 @@ schedule_node::set_latency_gen7(bool is_haswell) * then around 140. Presumably this is cache hit vs miss. */ latency = 50; + break; + case SHADER_OPCODE_UNTYPED_ATOMIC: /* Test code: * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q }; @@ -391,14 +395,16 @@ schedule_node::set_latency_gen7(bool is_haswell) class instruction_scheduler { public: - instruction_scheduler(backend_visitor *v, int grf_count, bool post_reg_alloc) + instruction_scheduler(backend_visitor *v, int grf_count, + instruction_scheduler_mode mode) { this->bv = v; this->mem_ctx = ralloc_context(NULL); this->grf_count = grf_count; this->instructions.make_empty(); this->instructions_to_schedule = 0; - this->post_reg_alloc = post_reg_alloc; + this->post_reg_alloc = (mode == SCHEDULE_POST); + this->mode = mode; this->time = 0; if (!post_reg_alloc) { this->remaining_grf_uses = rzalloc_array(mem_ctx, int, grf_count); @@ -417,8 +423,8 @@ public: void add_dep(schedule_node *before, schedule_node *after, int latency); void add_dep(schedule_node *before, schedule_node *after); - void run(exec_list *instructions); - void add_inst(backend_instruction *inst); + void run(cfg_t *cfg); + void add_insts_from_block(bblock_t *block); void compute_delay(schedule_node *node); virtual void calculate_deps() = 0; virtual schedule_node *choose_instruction_to_schedule() = 0; @@ -427,8 +433,8 @@ public: * Returns how many cycles it takes the instruction to issue. * * Instructions in gen hardware are handled one simd4 vector at a time, - * with 1 cycle per vector dispatched. Thus 8-wide pixel shaders take 2 - * cycles to dispatch and 16-wide (compressed) instructions take 4. + * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2 + * cycles to dispatch and SIMD16 (compressed) instructions take 4. */ virtual int issue_time(backend_instruction *inst) = 0; @@ -436,7 +442,7 @@ public: virtual void update_register_pressure(backend_instruction *inst) = 0; virtual int get_register_pressure_benefit(backend_instruction *inst) = 0; - void schedule_instructions(backend_instruction *next_block_header); + void schedule_instructions(bblock_t *block); void *mem_ctx; @@ -447,6 +453,8 @@ public: exec_list instructions; backend_visitor *bv; + instruction_scheduler_mode mode; + /** * Number of instructions left to schedule that reference each vgrf. * @@ -467,7 +475,8 @@ public: class fs_instruction_scheduler : public instruction_scheduler { public: - fs_instruction_scheduler(fs_visitor *v, int grf_count, bool post_reg_alloc); + fs_instruction_scheduler(fs_visitor *v, int grf_count, + instruction_scheduler_mode mode); void calculate_deps(); bool is_compressed(fs_inst *inst); schedule_node *choose_instruction_to_schedule(); @@ -481,8 +490,8 @@ public: fs_instruction_scheduler::fs_instruction_scheduler(fs_visitor *v, int grf_count, - bool post_reg_alloc) - : instruction_scheduler(v, grf_count, post_reg_alloc), + instruction_scheduler_mode mode) + : instruction_scheduler(v, grf_count, mode), v(v) { } @@ -498,7 +507,7 @@ fs_instruction_scheduler::count_remaining_grf_uses(backend_instruction *be) if (inst->dst.file == GRF) remaining_grf_uses[inst->dst.reg]++; - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -519,7 +528,7 @@ fs_instruction_scheduler::update_register_pressure(backend_instruction *be) grf_active[inst->dst.reg] = true; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file == GRF) { remaining_grf_uses[inst->src[i].reg]--; grf_active[inst->src[i].reg] = true; @@ -540,7 +549,7 @@ fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be) benefit -= v->virtual_grf_sizes[inst->dst.reg]; } - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file != GRF) continue; @@ -569,7 +578,7 @@ public: vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v, int grf_count) - : instruction_scheduler(v, grf_count, true), + : instruction_scheduler(v, grf_count, SCHEDULE_POST), v(v) { } @@ -603,6 +612,7 @@ schedule_node::schedule_node(backend_instruction *inst, this->parent_count = 0; this->unblocked_time = 0; this->cand_generation = 0; + this->delay = 0; /* We can't measure Gen6 timings directly but expect them to be much * closer to Gen7 than Gen4. @@ -616,17 +626,28 @@ schedule_node::schedule_node(backend_instruction *inst, } void -instruction_scheduler::add_inst(backend_instruction *inst) +instruction_scheduler::add_insts_from_block(bblock_t *block) { - schedule_node *n = new(mem_ctx) schedule_node(inst, this); + /* Removing the last instruction from a basic block removes the block as + * well, so put a NOP at the end to keep it alive. + */ + if (!block->end()->is_control_flow()) { + backend_instruction *nop = new(mem_ctx) backend_instruction(); + nop->opcode = BRW_OPCODE_NOP; + block->end()->insert_after(block, nop); + } + + foreach_inst_in_block_safe(backend_instruction, inst, block) { + if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow()) + continue; - assert(!inst->is_head_sentinel()); - assert(!inst->is_tail_sentinel()); + schedule_node *n = new(mem_ctx) schedule_node(inst, this); - this->instructions_to_schedule++; + this->instructions_to_schedule++; - inst->remove(); - instructions.push_tail(n); + inst->remove(block); + instructions.push_tail(n); + } } /** Recursive computation of the delay member of a node. */ @@ -652,7 +673,7 @@ instruction_scheduler::compute_delay(schedule_node *n) */ void instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, - int latency) + int latency) { if (!before || !after) return; @@ -661,22 +682,22 @@ instruction_scheduler::add_dep(schedule_node *before, schedule_node *after, for (int i = 0; i < before->child_count; i++) { if (before->children[i] == after) { - before->child_latency[i] = MAX2(before->child_latency[i], latency); - return; + before->child_latency[i] = MAX2(before->child_latency[i], latency); + return; } } if (before->child_array_size <= before->child_count) { if (before->child_array_size < 16) - before->child_array_size = 16; + before->child_array_size = 16; else - before->child_array_size *= 2; + before->child_array_size *= 2; before->children = reralloc(mem_ctx, before->children, - schedule_node *, - before->child_array_size); + schedule_node *, + before->child_array_size); before->child_latency = reralloc(mem_ctx, before->child_latency, - int, before->child_array_size); + int, before->child_array_size); } before->children[before->child_count] = after; @@ -707,15 +728,15 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) if (prev) { while (!prev->is_head_sentinel()) { - add_dep(prev, n, 0); - prev = (schedule_node *)prev->prev; + add_dep(prev, n, 0); + prev = (schedule_node *)prev->prev; } } if (next) { while (!next->is_tail_sentinel()) { - add_dep(n, next, 0); - next = (schedule_node *)next->next; + add_dep(n, next, 0); + next = (schedule_node *)next->next; } } } @@ -726,22 +747,20 @@ instruction_scheduler::add_barrier_deps(schedule_node *n) bool fs_instruction_scheduler::is_compressed(fs_inst *inst) { - return (v->dispatch_width == 16 && - !inst->force_uncompressed && - !inst->force_sechalf); + return inst->exec_size == 16; } void fs_instruction_scheduler::calculate_deps() { - /* Pre-register-allocation, this tracks the last write per VGRF (so - * different reg_offsets within it can interfere when they shouldn't). + /* Pre-register-allocation, this tracks the last write per VGRF offset. * After register allocation, reg_offsets are gone and we track individual * GRF registers. */ - schedule_node *last_grf_write[grf_count]; + schedule_node *last_grf_write[grf_count * 16]; schedule_node *last_mrf_write[BRW_MAX_MRF]; schedule_node *last_conditional_mod[2] = { NULL, NULL }; + schedule_node *last_accumulator_write = NULL; /* Fixed HW registers are assumed to be separate from the virtual * GRFs, so they can be tracked separately. We don't really write * to fixed GRFs much, so don't bother tracking them on a more @@ -763,8 +782,7 @@ fs_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT || @@ -772,18 +790,20 @@ fs_instruction_scheduler::calculate_deps() add_barrier_deps(n); /* read-after-write deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(last_grf_write[inst->src[i].reg + r], n); } else { - add_dep(last_grf_write[inst->src[i].reg], n); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r], n); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -792,74 +812,94 @@ fs_instruction_scheduler::calculate_deps() } else { add_dep(last_fixed_grf_write, n); } - } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + } else if (inst->src[i].is_accumulator()) { + add_dep(last_accumulator_write, n); + } else if (inst->src[i].file != BAD_FILE && + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(last_mrf_write[inst->base_mrf + i], n); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(last_mrf_write[inst->base_mrf + i], n); + } } if (inst->reads_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n); + add_dep(last_conditional_mod[inst->flag_subreg], n); + } + + if (inst->reads_accumulator_implicitly()) { + add_dep(last_accumulator_write, n); } /* write-after-write deps. */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) { + for (int r = 0; r < inst->regs_written; r++) { add_dep(last_grf_write[inst->dst.reg + r], n); last_grf_write[inst->dst.reg + r] = n; } } else { - add_dep(last_grf_write[inst->dst.reg], n); - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + add_dep(last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r], n); + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; - add_dep(last_mrf_write[reg], n); - last_mrf_write[reg] = n; - } + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; + add_dep(last_mrf_write[reg], n); + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; } else { last_fixed_grf_write = n; } - } else if (inst->dst.file != BAD_FILE) { - add_barrier_deps(n); + } else if (inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - add_dep(last_mrf_write[inst->base_mrf + i], n); - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + add_dep(last_mrf_write[inst->base_mrf + i], n); + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - add_dep(last_conditional_mod[inst->flag_subreg], n, 0); - last_conditional_mod[inst->flag_subreg] = n; + add_dep(last_conditional_mod[inst->flag_subreg], n, 0); + last_conditional_mod[inst->flag_subreg] = n; + } + + if (inst->writes_accumulator_implicitly(v->brw) && + !inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; } } @@ -867,29 +907,32 @@ fs_instruction_scheduler::calculate_deps() memset(last_grf_write, 0, sizeof(last_grf_write)); memset(last_mrf_write, 0, sizeof(last_mrf_write)); memset(last_conditional_mod, 0, sizeof(last_conditional_mod)); + last_accumulator_write = NULL; last_fixed_grf_write = NULL; exec_node *node; exec_node *prev; for (node = instructions.get_tail(), prev = node->prev; - !node->is_head_sentinel(); - node = prev, prev = node->prev) { + !node->is_head_sentinel(); + node = prev, prev = node->prev) { schedule_node *n = (schedule_node *)node; fs_inst *inst = (fs_inst *)n->inst; /* write-after-read deps. */ - for (int i = 0; i < 3; i++) { - if (inst->src[i].file == GRF) { + for (int i = 0; i < inst->sources; i++) { + if (inst->src[i].file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < reg_width * inst->regs_read(v, i); r++) + for (int r = 0; r < inst->regs_read(v, i); r++) add_dep(n, last_grf_write[inst->src[i].reg + r]); } else { - add_dep(n, last_grf_write[inst->src[i].reg]); + for (int r = 0; r < inst->regs_read(v, i); r++) { + add_dep(n, last_grf_write[inst->src[i].reg * 16 + inst->src[i].reg_offset + r]); + } } - } else if (inst->src[i].file == HW_REG && - (inst->src[i].fixed_hw_reg.file == - BRW_GENERAL_REGISTER_FILE)) { - if (post_reg_alloc) { + } else if (inst->src[i].file == HW_REG && + (inst->src[i].fixed_hw_reg.file == + BRW_GENERAL_REGISTER_FILE)) { + if (post_reg_alloc) { int size = reg_width; if (inst->src[i].fixed_hw_reg.vstride == BRW_VERTICAL_STRIDE_0) size = 1; @@ -898,26 +941,34 @@ fs_instruction_scheduler::calculate_deps() } else { add_dep(n, last_fixed_grf_write); } + } else if (inst->src[i].is_accumulator()) { + add_dep(n, last_accumulator_write); } else if (inst->src[i].file != BAD_FILE && - inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { - assert(inst->src[i].file != MRF); - add_barrier_deps(n); - } + inst->src[i].file != IMM && + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { + assert(inst->src[i].file != MRF); + add_barrier_deps(n); + } } if (inst->base_mrf != -1) { - for (int i = 0; i < inst->mlen; i++) { - /* It looks like the MRF regs are released in the send - * instruction once it's sent, not when the result comes - * back. - */ - add_dep(n, last_mrf_write[inst->base_mrf + i], 2); - } + for (int i = 0; i < inst->mlen; i++) { + /* It looks like the MRF regs are released in the send + * instruction once it's sent, not when the result comes + * back. + */ + add_dep(n, last_mrf_write[inst->base_mrf + i], 2); + } } if (inst->reads_flag()) { - add_dep(n, last_conditional_mod[inst->flag_subreg]); + add_dep(n, last_conditional_mod[inst->flag_subreg]); + } + + if (inst->reads_accumulator_implicitly()) { + add_dep(n, last_accumulator_write); } /* Update the things this instruction wrote, so earlier reads @@ -925,44 +976,53 @@ fs_instruction_scheduler::calculate_deps() */ if (inst->dst.file == GRF) { if (post_reg_alloc) { - for (int r = 0; r < inst->regs_written * reg_width; r++) + for (int r = 0; r < inst->regs_written; r++) last_grf_write[inst->dst.reg + r] = n; } else { - last_grf_write[inst->dst.reg] = n; + for (int r = 0; r < inst->regs_written; r++) { + last_grf_write[inst->dst.reg * 16 + inst->dst.reg_offset + r] = n; + } } } else if (inst->dst.file == MRF) { - int reg = inst->dst.reg & ~BRW_MRF_COMPR4; + int reg = inst->dst.reg & ~BRW_MRF_COMPR4; - last_mrf_write[reg] = n; + last_mrf_write[reg] = n; - if (is_compressed(inst)) { - if (inst->dst.reg & BRW_MRF_COMPR4) - reg += 4; - else - reg++; + if (is_compressed(inst)) { + if (inst->dst.reg & BRW_MRF_COMPR4) + reg += 4; + else + reg++; - last_mrf_write[reg] = n; - } + last_mrf_write[reg] = n; + } } else if (inst->dst.file == HW_REG && - inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { + inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { if (post_reg_alloc) { for (int r = 0; r < reg_width; r++) last_grf_write[inst->dst.fixed_hw_reg.nr + r] = n; } else { last_fixed_grf_write = n; } - } else if (inst->dst.file != BAD_FILE) { - add_barrier_deps(n); + } else if (inst->dst.is_accumulator()) { + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { + add_barrier_deps(n); } if (inst->mlen > 0 && inst->base_mrf != -1) { - for (int i = 0; i < v->implied_mrf_writes(inst); i++) { - last_mrf_write[inst->base_mrf + i] = n; - } + for (int i = 0; i < v->implied_mrf_writes(inst); i++) { + last_mrf_write[inst->base_mrf + i] = n; + } } if (inst->writes_flag()) { - last_conditional_mod[inst->flag_subreg] = n; + last_conditional_mod[inst->flag_subreg] = n; + } + + if (inst->writes_accumulator_implicitly(v->brw)) { + last_accumulator_write = n; } } } @@ -973,6 +1033,7 @@ vec4_instruction_scheduler::calculate_deps() schedule_node *last_grf_write[grf_count]; schedule_node *last_mrf_write[BRW_MAX_MRF]; schedule_node *last_conditional_mod = NULL; + schedule_node *last_accumulator_write = NULL; /* Fixed HW registers are assumed to be separate from the virtual * GRFs, so they can be tracked separately. We don't really write * to fixed GRFs much, so don't bother tracking them on a more @@ -993,8 +1054,7 @@ vec4_instruction_scheduler::calculate_deps() memset(last_mrf_write, 0, sizeof(last_mrf_write)); /* top-to-bottom dependencies: RAW and WAW. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { vec4_instruction *inst = (vec4_instruction *)n->inst; if (inst->has_side_effects()) @@ -1008,9 +1068,14 @@ vec4_instruction_scheduler::calculate_deps() (inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)) { add_dep(last_fixed_grf_write, n); + } else if (inst->src[i].is_accumulator()) { + assert(last_accumulator_write); + add_dep(last_accumulator_write, n); } else if (inst->src[i].file != BAD_FILE && inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { /* No reads from MRF, and ATTR is already translated away */ assert(inst->src[i].file != MRF && inst->src[i].file != ATTR); @@ -1026,11 +1091,16 @@ vec4_instruction_scheduler::calculate_deps() add_dep(last_mrf_write[inst->base_mrf + i], n); } - if (inst->depends_on_flags()) { + if (inst->reads_flag()) { assert(last_conditional_mod); add_dep(last_conditional_mod, n); } + if (inst->reads_accumulator_implicitly()) { + assert(last_accumulator_write); + add_dep(last_accumulator_write, n); + } + /* write-after-write deps. */ if (inst->dst.file == GRF) { add_dep(last_grf_write[inst->dst.reg], n); @@ -1041,7 +1111,11 @@ vec4_instruction_scheduler::calculate_deps() } else if (inst->dst.file == HW_REG && inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { last_fixed_grf_write = n; - } else if (inst->dst.file != BAD_FILE) { + } else if (inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { add_barrier_deps(n); } @@ -1052,16 +1126,23 @@ vec4_instruction_scheduler::calculate_deps() } } - if (inst->conditional_mod) { + if (inst->writes_flag()) { add_dep(last_conditional_mod, n, 0); last_conditional_mod = n; } + + if (inst->writes_accumulator_implicitly(v->brw) && + !inst->dst.is_accumulator()) { + add_dep(last_accumulator_write, n); + last_accumulator_write = n; + } } /* bottom-to-top dependencies: WAR */ memset(last_grf_write, 0, sizeof(last_grf_write)); memset(last_mrf_write, 0, sizeof(last_mrf_write)); last_conditional_mod = NULL; + last_accumulator_write = NULL; last_fixed_grf_write = NULL; exec_node *node; @@ -1080,9 +1161,13 @@ vec4_instruction_scheduler::calculate_deps() (inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)) { add_dep(n, last_fixed_grf_write); + } else if (inst->src[i].is_accumulator()) { + add_dep(n, last_accumulator_write); } else if (inst->src[i].file != BAD_FILE && inst->src[i].file != IMM && - inst->src[i].file != UNIFORM) { + inst->src[i].file != UNIFORM && + (inst->src[i].file != HW_REG || + inst->src[i].fixed_hw_reg.file != IMM)) { assert(inst->src[i].file != MRF && inst->src[i].file != ATTR); add_barrier_deps(n); @@ -1097,10 +1182,14 @@ vec4_instruction_scheduler::calculate_deps() add_dep(n, last_mrf_write[inst->base_mrf + i], 2); } - if (inst->depends_on_flags()) { + if (inst->reads_flag()) { add_dep(n, last_conditional_mod); } + if (inst->reads_accumulator_implicitly()) { + add_dep(n, last_accumulator_write); + } + /* Update the things this instruction wrote, so earlier reads * can mark this as WAR dependency. */ @@ -1111,7 +1200,10 @@ vec4_instruction_scheduler::calculate_deps() } else if (inst->dst.file == HW_REG && inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { last_fixed_grf_write = n; - } else if (inst->dst.file != BAD_FILE) { + } else if (inst->dst.is_accumulator()) { + last_accumulator_write = n; + } else if (inst->dst.file != BAD_FILE && + !inst->dst.is_null()) { add_barrier_deps(n); } @@ -1121,26 +1213,29 @@ vec4_instruction_scheduler::calculate_deps() } } - if (inst->conditional_mod) { + if (inst->writes_flag()) { last_conditional_mod = n; } + + if (inst->writes_accumulator_implicitly(v->brw)) { + last_accumulator_write = n; + } } } schedule_node * fs_instruction_scheduler::choose_instruction_to_schedule() { + struct brw_context *brw = v->brw; schedule_node *chosen = NULL; - if (post_reg_alloc) { + if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) { int chosen_time = 0; - /* Of the instructions closest ready to execute or the closest to + /* Of the instructions ready to execute or the closest to * being ready, choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1149,12 +1244,11 @@ fs_instruction_scheduler::choose_instruction_to_schedule() } else { /* Before register allocation, we don't care about the latencies of * instructions. All we care about is reducing live intervals of - * variables so that we can avoid register spilling, or get 16-wide + * variables so that we can avoid register spilling, or get SIMD16 * shaders which naturally do a better job of hiding instruction * latency. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { fs_inst *inst = (fs_inst *)n->inst; if (!chosen) { @@ -1179,40 +1273,43 @@ fs_instruction_scheduler::choose_instruction_to_schedule() continue; } - /* Prefer instructions that recently became available for scheduling. - * These are the things that are most likely to (eventually) make a - * variable dead and reduce register pressure. Typical register - * pressure estimates don't work for us because most of our pressure - * comes from texturing, where no single instruction to schedule will - * make a vec4 value dead. - */ - if (n->cand_generation > chosen->cand_generation) { - chosen = n; - continue; - } else if (n->cand_generation < chosen->cand_generation) { - continue; - } - - /* On MRF-using chips, prefer non-SEND instructions. If we don't do - * this, then because we prefer instructions that just became - * candidates, we'll end up in a pattern of scheduling a SEND, then - * the MRFs for the next SEND, then the next SEND, then the MRFs, - * etc., without ever consuming the results of a send. - */ - if (v->brw->gen < 7) { - fs_inst *chosen_inst = (fs_inst *)chosen->inst; - - /* We use regs_written > 1 as our test for the kind of send - * instruction to avoid -- only sends generate many regs, and a - * single-result send is probably actually reducing register - * pressure. + if (mode == SCHEDULE_PRE_LIFO) { + /* Prefer instructions that recently became available for + * scheduling. These are the things that are most likely to + * (eventually) make a variable dead and reduce register pressure. + * Typical register pressure estimates don't work for us because + * most of our pressure comes from texturing, where no single + * instruction to schedule will make a vec4 value dead. */ - if (inst->regs_written <= 1 && chosen_inst->regs_written > 1) { + if (n->cand_generation > chosen->cand_generation) { chosen = n; continue; - } else if (inst->regs_written > chosen_inst->regs_written) { + } else if (n->cand_generation < chosen->cand_generation) { continue; } + + /* On MRF-using chips, prefer non-SEND instructions. If we don't + * do this, then because we prefer instructions that just became + * candidates, we'll end up in a pattern of scheduling a SEND, + * then the MRFs for the next SEND, then the next SEND, then the + * MRFs, etc., without ever consuming the results of a send. + */ + if (brw->gen < 7) { + fs_inst *chosen_inst = (fs_inst *)chosen->inst; + + /* We use regs_written > 1 as our test for the kind of send + * instruction to avoid -- only sends generate many regs, and a + * single-result send is probably actually reducing register + * pressure. + */ + if (inst->regs_written <= inst->dst.width / 8 && + chosen_inst->regs_written > chosen_inst->dst.width / 8) { + chosen = n; + continue; + } else if (inst->regs_written > chosen_inst->regs_written) { + continue; + } + } } /* For instructions pushed on the cands list at the same time, prefer @@ -1246,9 +1343,7 @@ vec4_instruction_scheduler::choose_instruction_to_schedule() /* Of the instructions ready to execute or the closest to being ready, * choose the oldest one. */ - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - + foreach_in_list(schedule_node, n, &instructions) { if (!chosen || n->unblocked_time < chosen_time) { chosen = n; chosen_time = n->unblocked_time; @@ -1275,15 +1370,16 @@ vec4_instruction_scheduler::issue_time(backend_instruction *inst) } void -instruction_scheduler::schedule_instructions(backend_instruction *next_block_header) +instruction_scheduler::schedule_instructions(bblock_t *block) { + struct brw_context *brw = bv->brw; + backend_instruction *inst = block->end(); time = 0; /* Remove non-DAG heads from the list. */ - foreach_list_safe(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list_safe(schedule_node, n, &instructions) { if (n->parent_count != 0) - n->remove(); + n->remove(); } unsigned cand_generation = 1; @@ -1293,7 +1389,7 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea /* Schedule this instruction. */ assert(chosen); chosen->remove(); - next_block_header->insert_before(chosen->inst); + inst->insert_before(block, chosen->inst); instructions_to_schedule--; update_register_pressure(chosen->inst); @@ -1310,7 +1406,7 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea time = MAX2(time, chosen->unblocked_time); if (debug) { - printf("clock %4d, scheduled: ", time); + fprintf(stderr, "clock %4d, scheduled: ", time); bv->dump_instruction(chosen->inst); } @@ -1320,24 +1416,24 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * DAG edge as we do so. */ for (int i = chosen->child_count - 1; i >= 0; i--) { - schedule_node *child = chosen->children[i]; + schedule_node *child = chosen->children[i]; - child->unblocked_time = MAX2(child->unblocked_time, - time + chosen->child_latency[i]); + child->unblocked_time = MAX2(child->unblocked_time, + time + chosen->child_latency[i]); if (debug) { - printf("\tchild %d, %d parents: ", i, child->parent_count); + fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count); bv->dump_instruction(child->inst); } child->cand_generation = cand_generation; - child->parent_count--; - if (child->parent_count == 0) { + child->parent_count--; + if (child->parent_count == 0) { if (debug) { - printf("\t\tnow available\n"); + fprintf(stderr, "\t\tnow available\n"); } - instructions.push_head(child); - } + instructions.push_head(child); + } } cand_generation++; @@ -1346,28 +1442,26 @@ instruction_scheduler::schedule_instructions(backend_instruction *next_block_hea * the next math instruction isn't going to make progress until the first * is done. */ - if (chosen->inst->is_math()) { - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; - - if (n->inst->is_math()) - n->unblocked_time = MAX2(n->unblocked_time, - time + chosen->latency); - } + if (brw->gen < 6 && chosen->inst->is_math()) { + foreach_in_list(schedule_node, n, &instructions) { + if (n->inst->is_math()) + n->unblocked_time = MAX2(n->unblocked_time, + time + chosen->latency); + } } } + if (block->end()->opcode == BRW_OPCODE_NOP) + block->end()->remove(block); assert(instructions_to_schedule == 0); } void -instruction_scheduler::run(exec_list *all_instructions) +instruction_scheduler::run(cfg_t *cfg) { - backend_instruction *next_block_header = - (backend_instruction *)all_instructions->head; - if (debug) { - printf("\nInstructions before scheduling (reg_alloc %d)\n", post_reg_alloc); + fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n", + post_reg_alloc); bv->dump_instructions(); } @@ -1375,51 +1469,47 @@ instruction_scheduler::run(exec_list *all_instructions) * scheduling. */ if (remaining_grf_uses) { - foreach_list(node, all_instructions) { - count_remaining_grf_uses((backend_instruction *)node); + foreach_block_and_inst(block, backend_instruction, inst, cfg) { + count_remaining_grf_uses(inst); } } - while (!next_block_header->is_tail_sentinel()) { - /* Add things to be scheduled until we get to a new BB. */ - while (!next_block_header->is_tail_sentinel()) { - backend_instruction *inst = next_block_header; - next_block_header = (backend_instruction *)next_block_header->next; + foreach_block(block, cfg) { + if (block->end_ip - block->start_ip <= 1) + continue; + + add_insts_from_block(block); - add_inst(inst); - if (inst->is_control_flow()) - break; - } calculate_deps(); - foreach_list(node, &instructions) { - schedule_node *n = (schedule_node *)node; + foreach_in_list(schedule_node, n, &instructions) { compute_delay(n); } - schedule_instructions(next_block_header); + schedule_instructions(block); } if (debug) { - printf("\nInstructions after scheduling (reg_alloc %d)\n", post_reg_alloc); + fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n", + post_reg_alloc); bv->dump_instructions(); } } void -fs_visitor::schedule_instructions(bool post_reg_alloc) +fs_visitor::schedule_instructions(instruction_scheduler_mode mode) { int grf_count; - if (post_reg_alloc) + if (mode == SCHEDULE_POST) grf_count = grf_used; else grf_count = virtual_grf_count; - fs_instruction_scheduler sched(this, grf_count, post_reg_alloc); - sched.run(&instructions); + fs_instruction_scheduler sched(this, grf_count, mode); + sched.run(cfg); - if (unlikely(INTEL_DEBUG & DEBUG_WM) && post_reg_alloc) { - printf("fs%d estimated execution time: %d cycles\n", + if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) { + fprintf(stderr, "fs%d estimated execution time: %d cycles\n", dispatch_width, sched.time); } @@ -1430,11 +1520,11 @@ void vec4_visitor::opt_schedule_instructions() { vec4_instruction_scheduler sched(this, prog_data->total_grf); - sched.run(&instructions); + sched.run(cfg); if (unlikely(debug_flag)) { - printf("vec4 estimated execution time: %d cycles\n", sched.time); + fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time); } - this->live_intervals_valid = false; + invalidate_live_intervals(); }