X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_vec4.cpp;h=0a68413bfba0114e0d5f7331fd4d6d10bb2f3073;hb=7bfbaf4a5ab580a8661ea99059cb48c64a016ab6;hp=acf0b6390bcdfcb2f694ac3e45911fa934e51811;hpb=20a849b4aa63c7fce96b04de674a4c70f054ed9c;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_vec4.cpp b/src/mesa/drivers/dri/i965/brw_vec4.cpp index acf0b6390bc..0a68413bfba 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4.cpp @@ -22,6 +22,7 @@ */ #include "brw_vec4.h" +#include "brw_fs.h" #include "brw_cfg.h" #include "brw_vs.h" #include "brw_dead_control_flow.h" @@ -112,6 +113,27 @@ src_reg::src_reg(int32_t i) this->fixed_hw_reg.dw1.d = i; } +src_reg::src_reg(uint8_t vf[4]) +{ + init(); + + this->file = IMM; + this->type = BRW_REGISTER_TYPE_VF; + memcpy(&this->fixed_hw_reg.dw1.ud, vf, sizeof(unsigned)); +} + +src_reg::src_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3) +{ + init(); + + this->file = IMM; + this->type = BRW_REGISTER_TYPE_VF; + this->fixed_hw_reg.dw1.ud = (vf0 << 0) | + (vf1 << 8) | + (vf2 << 16) | + (vf3 << 24); +} + src_reg::src_reg(struct brw_reg reg) { init(); @@ -212,6 +234,22 @@ dst_reg::dst_reg(src_reg reg) this->fixed_hw_reg = reg.fixed_hw_reg; } +bool +dst_reg::equals(const dst_reg &r) const +{ + return (file == r.file && + reg == r.reg && + reg_offset == r.reg_offset && + type == r.type && + negate == r.negate && + abs == r.abs && + writemask == r.writemask && + (reladdr == r.reladdr || + (reladdr && r.reladdr && reladdr->equals(*r.reladdr))) && + memcmp(&fixed_hw_reg, &r.fixed_hw_reg, + sizeof(fixed_hw_reg)) == 0); +} + bool vec4_instruction::is_send_from_grf() { @@ -224,6 +262,24 @@ vec4_instruction::is_send_from_grf() } } +unsigned +vec4_instruction::regs_read(unsigned arg) const +{ + if (src[arg].file == BAD_FILE) + return 0; + + switch (opcode) { + case SHADER_OPCODE_SHADER_TIME_ADD: + return arg == 0 ? mlen : 1; + + case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7: + return arg == 1 ? mlen : 1; + + default: + return 1; + } +} + bool vec4_instruction::can_do_source_mods(struct brw_context *brw) { @@ -249,7 +305,7 @@ vec4_instruction::can_do_source_mods(struct brw_context *brw) int vec4_visitor::implied_mrf_writes(vec4_instruction *inst) { - if (inst->mlen == 0) + if (inst->mlen == 0 || inst->is_send_from_grf()) return 0; switch (inst->opcode) { @@ -274,8 +330,11 @@ vec4_visitor::implied_mrf_writes(vec4_instruction *inst) case SHADER_OPCODE_GEN4_SCRATCH_WRITE: return 3; case GS_OPCODE_URB_WRITE: + case GS_OPCODE_URB_WRITE_ALLOCATE: case GS_OPCODE_THREAD_END: return 0; + case GS_OPCODE_FF_SYNC: + return 1; case SHADER_OPCODE_SHADER_TIME_ADD: return 0; case SHADER_OPCODE_TEX: @@ -311,6 +370,72 @@ src_reg::equals(const src_reg &r) const sizeof(fixed_hw_reg)) == 0); } +bool +vec4_visitor::opt_vector_float() +{ + bool progress = false; + + int last_reg = -1, last_reg_offset = -1; + enum register_file last_reg_file = BAD_FILE; + + int remaining_channels = 0; + uint8_t imm[4]; + int inst_count = 0; + vec4_instruction *imm_inst[4]; + + foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) { + if (last_reg != inst->dst.reg || + last_reg_offset != inst->dst.reg_offset || + last_reg_file != inst->dst.file) { + last_reg = inst->dst.reg; + last_reg_offset = inst->dst.reg_offset; + last_reg_file = inst->dst.file; + remaining_channels = WRITEMASK_XYZW; + + inst_count = 0; + } + + if (inst->opcode != BRW_OPCODE_MOV || + inst->dst.writemask == WRITEMASK_XYZW || + inst->src[0].file != IMM) + continue; + + int vf = brw_float_to_vf(inst->src[0].fixed_hw_reg.dw1.f); + if (vf == -1) + continue; + + if ((inst->dst.writemask & WRITEMASK_X) != 0) + imm[0] = vf; + if ((inst->dst.writemask & WRITEMASK_Y) != 0) + imm[1] = vf; + if ((inst->dst.writemask & WRITEMASK_Z) != 0) + imm[2] = vf; + if ((inst->dst.writemask & WRITEMASK_W) != 0) + imm[3] = vf; + + imm_inst[inst_count++] = inst; + + remaining_channels &= ~inst->dst.writemask; + if (remaining_channels == 0) { + vec4_instruction *mov = MOV(inst->dst, imm); + mov->dst.type = BRW_REGISTER_TYPE_F; + mov->dst.writemask = WRITEMASK_XYZW; + inst->insert_after(block, mov); + last_reg = -1; + + for (int i = 0; i < inst_count; i++) { + imm_inst[i]->remove(block); + } + progress = true; + } + } + + if (progress) + invalidate_live_intervals(); + + return progress; +} + /* Replaces unused channels of a swizzle with channels that are used. * * For instance, this pass transforms @@ -329,14 +454,21 @@ vec4_visitor::opt_reduce_swizzle() { bool progress = false; - foreach_in_list_safe(vec4_instruction, inst, &instructions) { - if (inst->dst.file == BAD_FILE || inst->dst.file == HW_REG) + foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) { + if (inst->dst.file == BAD_FILE || inst->dst.file == HW_REG || + inst->is_send_from_grf()) continue; int swizzle[4]; /* Determine which channels of the sources are read. */ switch (inst->opcode) { + case VEC4_OPCODE_PACK_BYTES: + swizzle[0] = 0; + swizzle[1] = 1; + swizzle[2] = 2; + swizzle[3] = 3; + break; case BRW_OPCODE_DP4: case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0, * but all four of src1. @@ -402,157 +534,6 @@ vec4_visitor::opt_reduce_swizzle() } } - if (progress) - invalidate_live_intervals(false); - - return progress; -} - -static bool -try_eliminate_instruction(vec4_instruction *inst, int new_writemask, - const struct brw_context *brw) -{ - if (inst->has_side_effects()) - return false; - - if (new_writemask == 0) { - /* Don't dead code eliminate instructions that write to the - * accumulator as a side-effect. Instead just set the destination - * to the null register to free it. - */ - if (inst->writes_accumulator || inst->writes_flag()) { - inst->dst = dst_reg(retype(brw_null_reg(), inst->dst.type)); - } else { - inst->remove(); - } - - return true; - } else if (inst->dst.writemask != new_writemask) { - switch (inst->opcode) { - case SHADER_OPCODE_TXF_CMS: - case SHADER_OPCODE_GEN4_SCRATCH_READ: - case VS_OPCODE_PULL_CONSTANT_LOAD: - case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7: - break; - default: - /* Do not set a writemask on Gen6 for math instructions, those are - * executed using align1 mode that does not support a destination mask. - */ - if (!(brw->gen == 6 && inst->is_math()) && !inst->is_tex()) { - inst->dst.writemask = new_writemask; - return true; - } - } - } - - return false; -} - -/** - * Must be called after calculate_live_intervals() to remove unused - * writes to registers -- register allocation will fail otherwise - * because something deffed but not used won't be considered to - * interfere with other regs. - */ -bool -vec4_visitor::dead_code_eliminate() -{ - bool progress = false; - int pc = -1; - - calculate_live_intervals(); - - foreach_in_list_safe(vec4_instruction, inst, &instructions) { - pc++; - - bool inst_writes_flag = false; - if (inst->dst.file != GRF) { - if (inst->dst.is_null() && inst->writes_flag()) { - inst_writes_flag = true; - } else { - continue; - } - } - - if (inst->dst.file == GRF) { - int write_mask = inst->dst.writemask; - - for (int c = 0; c < 4; c++) { - if (write_mask & (1 << c)) { - assert(this->virtual_grf_end[inst->dst.reg * 4 + c] >= pc); - if (this->virtual_grf_end[inst->dst.reg * 4 + c] == pc) { - write_mask &= ~(1 << c); - } - } - } - - progress = try_eliminate_instruction(inst, write_mask, brw) || - progress; - } - - if (inst->predicate || inst->prev == NULL) - continue; - - int dead_channels; - if (inst_writes_flag) { -/* Arbitrarily chosen, other than not being an xyzw writemask. */ -#define FLAG_WRITEMASK (1 << 5) - dead_channels = inst->reads_flag() ? 0 : FLAG_WRITEMASK; - } else { - dead_channels = inst->dst.writemask; - - for (int i = 0; i < 3; i++) { - if (inst->src[i].file != GRF || - inst->src[i].reg != inst->dst.reg) - continue; - - for (int j = 0; j < 4; j++) { - int swiz = BRW_GET_SWZ(inst->src[i].swizzle, j); - dead_channels &= ~(1 << swiz); - } - } - } - - for (exec_node *node = inst->prev, *prev = node->prev; - prev != NULL && dead_channels != 0; - node = prev, prev = prev->prev) { - vec4_instruction *scan_inst = (vec4_instruction *)node; - - if (scan_inst->is_control_flow()) - break; - - if (inst_writes_flag) { - if (scan_inst->dst.is_null() && scan_inst->writes_flag()) { - scan_inst->remove(); - progress = true; - continue; - } else if (scan_inst->reads_flag()) { - break; - } - } - - if (inst->dst.file == scan_inst->dst.file && - inst->dst.reg == scan_inst->dst.reg && - inst->dst.reg_offset == scan_inst->dst.reg_offset) { - int new_writemask = scan_inst->dst.writemask & ~dead_channels; - - progress = try_eliminate_instruction(scan_inst, new_writemask, brw) || - progress; - } - - for (int i = 0; i < 3; i++) { - if (scan_inst->src[i].file != inst->dst.file || - scan_inst->src[i].reg != inst->dst.reg) - continue; - - for (int j = 0; j < 4; j++) { - int swiz = BRW_GET_SWZ(scan_inst->src[i].swizzle, j); - dead_channels &= ~(1 << swiz); - } - } - } - } - if (progress) invalidate_live_intervals(); @@ -569,7 +550,7 @@ vec4_visitor::split_uniform_registers() * vector. The goal is to make elimination of unused uniform * components easier later. */ - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { for (int i = 0 ; i < 3; i++) { if (inst->src[i].file != UNIFORM) continue; @@ -602,7 +583,7 @@ vec4_visitor::pack_uniform_registers() * expect unused vector elements when we've moved array access out * to pull constants, and from some GLSL code generators like wine. */ - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { for (int i = 0 ; i < 3; i++) { if (inst->src[i].file != UNIFORM) continue; @@ -655,7 +636,7 @@ vec4_visitor::pack_uniform_registers() this->uniforms = new_uniform_count; /* Now, update the instructions for our repacked uniforms. */ - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { for (int i = 0 ; i < 3; i++) { int src = inst->src[i].reg; @@ -690,8 +671,31 @@ vec4_visitor::opt_algebraic() { bool progress = false; - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { switch (inst->opcode) { + case BRW_OPCODE_MOV: + if (inst->src[0].file != IMM) + break; + + if (inst->saturate) { + if (inst->dst.type != inst->src[0].type) + assert(!"unimplemented: saturate mixed types"); + + if (brw_saturate_immediate(inst->dst.type, + &inst->src[0].fixed_hw_reg)) { + inst->saturate = false; + progress = true; + } + } + break; + + case VEC4_OPCODE_UNPACK_UNIFORM: + if (inst->src[0].file != UNIFORM) { + inst->opcode = BRW_OPCODE_MOV; + progress = true; + } + break; + case BRW_OPCODE_ADD: if (inst->src[1].is_zero()) { inst->opcode = BRW_OPCODE_MOV; @@ -722,8 +726,36 @@ vec4_visitor::opt_algebraic() inst->opcode = BRW_OPCODE_MOV; inst->src[1] = src_reg(); progress = true; + } else if (inst->src[1].is_negative_one()) { + inst->opcode = BRW_OPCODE_MOV; + inst->src[0].negate = !inst->src[0].negate; + inst->src[1] = src_reg(); + progress = true; } break; + case BRW_OPCODE_CMP: + if (inst->conditional_mod == BRW_CONDITIONAL_GE && + inst->src[0].abs && + inst->src[0].negate && + inst->src[1].is_zero()) { + inst->src[0].abs = false; + inst->src[0].negate = false; + inst->conditional_mod = BRW_CONDITIONAL_Z; + progress = true; + break; + } + break; + case SHADER_OPCODE_RCP: { + vec4_instruction *prev = (vec4_instruction *)inst->prev; + if (prev->opcode == SHADER_OPCODE_SQRT) { + if (inst->src[0].equals(src_reg(prev->dst))) { + inst->opcode = SHADER_OPCODE_RSQ; + inst->src[0] = prev->src[0]; + progress = true; + } + } + break; + } default: break; } @@ -798,7 +830,7 @@ vec4_visitor::move_push_constants_to_pull_constants() /* Now actually rewrite usage of the things we've moved to pull * constants. */ - foreach_in_list_safe(vec4_instruction, inst, &instructions) { + foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) { for (int i = 0 ; i < 3; i++) { if (inst->src[i].file != UNIFORM || pull_constant_loc[inst->src[i].reg] == -1) @@ -808,7 +840,7 @@ vec4_visitor::move_push_constants_to_pull_constants() dst_reg temp = dst_reg(this, glsl_type::vec4_type); - emit_pull_constant_load(inst, temp, inst->src[i], + emit_pull_constant_load(block, inst, temp, inst->src[i], pull_constant_loc[uniform]); inst->src[i].file = temp.file; @@ -822,6 +854,53 @@ vec4_visitor::move_push_constants_to_pull_constants() pack_uniform_registers(); } +/* Conditions for which we want to avoid setting the dependency control bits */ +bool +vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst) +{ +#define IS_DWORD(reg) \ + (reg.type == BRW_REGISTER_TYPE_UD || \ + reg.type == BRW_REGISTER_TYPE_D) + + /* "When source or destination datatype is 64b or operation is integer DWord + * multiply, DepCtrl must not be used." + * May apply to future SoCs as well. + */ + if (brw->is_cherryview) { + if (inst->opcode == BRW_OPCODE_MUL && + IS_DWORD(inst->src[0]) && + IS_DWORD(inst->src[1])) + return true; + } +#undef IS_DWORD + + if (brw->gen >= 8) { + if (inst->opcode == BRW_OPCODE_F32TO16) + return true; + } + + /* + * mlen: + * In the presence of send messages, totally interrupt dependency + * control. They're long enough that the chance of dependency + * control around them just doesn't matter. + * + * predicate: + * From the Ivy Bridge PRM, volume 4 part 3.7, page 80: + * When a sequence of NoDDChk and NoDDClr are used, the last instruction that + * completes the scoreboard clear must have a non-zero execution mask. This + * means, if any kind of predication can change the execution mask or channel + * enable of the last instruction, the optimization must be avoided. This is + * to avoid instructions being shot down the pipeline when no writes are + * required. + * + * math: + * Dependency control does not work well over math instructions. + * NB: Discovered empirically + */ + return (inst->mlen || inst->predicate || inst->is_math()); +} + /** * Sets the dependency control fields on instructions after register * allocation and before the generator is run. @@ -845,8 +924,6 @@ vec4_visitor::opt_set_dependency_control() vec4_instruction *last_mrf_write[BRW_MAX_GRF]; uint8_t mrf_channels_written[BRW_MAX_GRF]; - calculate_cfg(); - assert(prog_data->total_grf || !"Must be called after register allocation"); @@ -869,28 +946,7 @@ vec4_visitor::opt_set_dependency_control() assert(inst->src[i].file != MRF); } - /* In the presence of send messages, totally interrupt dependency - * control. They're long enough that the chance of dependency - * control around them just doesn't matter. - */ - if (inst->mlen) { - memset(last_grf_write, 0, sizeof(last_grf_write)); - memset(last_mrf_write, 0, sizeof(last_mrf_write)); - continue; - } - - /* It looks like setting dependency control on a predicated - * instruction hangs the GPU. - */ - if (inst->predicate) { - memset(last_grf_write, 0, sizeof(last_grf_write)); - memset(last_mrf_write, 0, sizeof(last_mrf_write)); - continue; - } - - /* Dependency control does not work well over math instructions. - */ - if (inst->is_math()) { + if (is_dep_ctrl_unsafe(inst)) { memset(last_grf_write, 0, sizeof(last_grf_write)); memset(last_mrf_write, 0, sizeof(last_mrf_write)); continue; @@ -933,9 +989,9 @@ vec4_visitor::opt_set_dependency_control() } bool -vec4_instruction::can_reswizzle_dst(int dst_writemask, - int swizzle, - int swizzle_mask) +vec4_instruction::can_reswizzle(int dst_writemask, + int swizzle, + int swizzle_mask) { /* If this instruction sets anything not referenced by swizzle, then we'd * totally break it when we reswizzle. @@ -943,30 +999,10 @@ vec4_instruction::can_reswizzle_dst(int dst_writemask, if (dst.writemask & ~swizzle_mask) return false; - switch (opcode) { - default: - if (!brw_is_single_value_swizzle(swizzle)) { - /* Check if there happens to be no reswizzling required. */ - for (int c = 0; c < 4; c++) { - int bit = 1 << BRW_GET_SWZ(swizzle, c); - /* Skip components of the swizzle not used by the dst. */ - if (!(dst_writemask & (1 << c))) - continue; - - /* We don't do the reswizzling yet, so just sanity check that we - * don't have to. - */ - if (bit != (1 << c)) - return false; - } - return true; - } - /* fallthrough */ - case BRW_OPCODE_DP4: - case BRW_OPCODE_DP3: - case BRW_OPCODE_DP2: - return true; - } + if (mlen > 0) + return false; + + return true; } /** @@ -977,43 +1013,45 @@ vec4_instruction::can_reswizzle_dst(int dst_writemask, * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x */ void -vec4_instruction::reswizzle_dst(int dst_writemask, int swizzle) +vec4_instruction::reswizzle(int dst_writemask, int swizzle) { int new_writemask = 0; + int new_swizzle[4] = { 0 }; - switch (opcode) { - default: - if (!brw_is_single_value_swizzle(swizzle)) { - for (int c = 0; c < 4; c++) { - /* Skip components of the swizzle not used by the dst. */ - if (!(dst_writemask & (1 << c))) - continue; - - /* We don't do the reswizzling yet, so just sanity check that we - * don't have to. - */ - assert((1 << BRW_GET_SWZ(swizzle, c)) == (1 << c)); - } - break; - } - /* fallthrough */ - case BRW_OPCODE_DP4: - case BRW_OPCODE_DP3: - case BRW_OPCODE_DP2: - for (int c = 0; c < 4; c++) { - int bit = 1 << BRW_GET_SWZ(swizzle, c); - /* Skip components of the swizzle not used by the dst. */ - if (!(dst_writemask & (1 << c))) + /* Dot product instructions write a single result into all channels. */ + if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH && + opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2) { + for (int i = 0; i < 3; i++) { + if (src[i].file == BAD_FILE || src[i].file == IMM) continue; - /* If we were populating this component, then populate the - * corresponding channel of the new dst. + + /* Destination write mask doesn't correspond to source swizzle for the + * pack_bytes instruction. */ - if (dst.writemask & bit) - new_writemask |= (1 << c); + if (opcode == VEC4_OPCODE_PACK_BYTES) + continue; + + for (int c = 0; c < 4; c++) { + new_swizzle[c] = BRW_GET_SWZ(src[i].swizzle, BRW_GET_SWZ(swizzle, c)); + } + + src[i].swizzle = BRW_SWIZZLE4(new_swizzle[0], new_swizzle[1], + new_swizzle[2], new_swizzle[3]); } - dst.writemask = new_writemask; - break; } + + for (int c = 0; c < 4; c++) { + int bit = 1 << BRW_GET_SWZ(swizzle, c); + /* Skip components of the swizzle not used by the dst. */ + if (!(dst_writemask & (1 << c))) + continue; + /* If we were populating this component, then populate the + * corresponding channel of the new dst. + */ + if (dst.writemask & bit) + new_writemask |= (1 << c); + } + dst.writemask = new_writemask; } /* @@ -1078,10 +1116,11 @@ vec4_visitor::opt_register_coalesce() * everything writing to the temporary to write into the destination * instead. */ - vec4_instruction *scan_inst; - for (scan_inst = (vec4_instruction *)inst->prev; - scan_inst->prev != NULL; - scan_inst = (vec4_instruction *)scan_inst->prev) { + vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev; + foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst, + inst, block) { + _scan_inst = scan_inst; + if (scan_inst->dst.file == GRF && scan_inst->dst.reg == inst->src[0].reg && scan_inst->dst.reg_offset == inst->src[0].reg_offset) { @@ -1102,9 +1141,9 @@ vec4_visitor::opt_register_coalesce() } /* If we can't handle the swizzle, bail. */ - if (!scan_inst->can_reswizzle_dst(inst->dst.writemask, - inst->src[0].swizzle, - swizzle_mask)) { + if (!scan_inst->can_reswizzle(inst->dst.writemask, + inst->src[0].swizzle, + swizzle_mask)) { break; } @@ -1123,16 +1162,6 @@ vec4_visitor::opt_register_coalesce() break; } - /* We don't handle flow control here. Most computation of values - * that could be coalesced happens just before their use. - */ - if (scan_inst->opcode == BRW_OPCODE_DO || - scan_inst->opcode == BRW_OPCODE_WHILE || - scan_inst->opcode == BRW_OPCODE_ELSE || - scan_inst->opcode == BRW_OPCODE_ENDIF) { - break; - } - /* You can't read from an MRF, so if someone else reads our MRF's * source GRF that we wanted to rewrite, that stops us. If it's a * GRF we're trying to coalesce to, we don't actually handle @@ -1185,13 +1214,13 @@ vec4_visitor::opt_register_coalesce() * computing the value. Now go rewrite the instruction stream * between the two. */ - + vec4_instruction *scan_inst = _scan_inst; while (scan_inst != inst) { if (scan_inst->dst.file == GRF && scan_inst->dst.reg == inst->src[0].reg && scan_inst->dst.reg_offset == inst->src[0].reg_offset) { - scan_inst->reswizzle_dst(inst->dst.writemask, - inst->src[0].swizzle); + scan_inst->reswizzle(inst->dst.writemask, + inst->src[0].swizzle); scan_inst->dst.file = inst->dst.file; scan_inst->dst.reg = inst->dst.reg; scan_inst->dst.reg_offset = inst->dst.reg_offset; @@ -1205,7 +1234,7 @@ vec4_visitor::opt_register_coalesce() } if (progress) - invalidate_live_intervals(false); + invalidate_live_intervals(); return progress; } @@ -1225,7 +1254,7 @@ vec4_visitor::opt_register_coalesce() void vec4_visitor::split_virtual_grfs() { - int num_vars = this->virtual_grf_count; + int num_vars = this->alloc.count; int new_virtual_grf[num_vars]; bool split_grf[num_vars]; @@ -1233,13 +1262,13 @@ vec4_visitor::split_virtual_grfs() /* Try to split anything > 0 sized. */ for (int i = 0; i < num_vars; i++) { - split_grf[i] = this->virtual_grf_sizes[i] != 1; + split_grf[i] = this->alloc.sizes[i] != 1; } /* Check that the instructions are compatible with the registers we're trying * to split. */ - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { /* If there's a SEND message loading from a GRF on gen7+, it needs to be * contiguous. */ @@ -1259,16 +1288,16 @@ vec4_visitor::split_virtual_grfs() if (!split_grf[i]) continue; - new_virtual_grf[i] = virtual_grf_alloc(1); - for (int j = 2; j < this->virtual_grf_sizes[i]; j++) { - int reg = virtual_grf_alloc(1); + new_virtual_grf[i] = alloc.allocate(1); + for (unsigned j = 2; j < this->alloc.sizes[i]; j++) { + unsigned reg = alloc.allocate(1); assert(reg == new_virtual_grf[i] + j - 1); (void) reg; } - this->virtual_grf_sizes[i] = 1; + this->alloc.sizes[i] = 1; } - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { if (inst->dst.file == GRF && split_grf[inst->dst.reg] && inst->dst.reg_offset != 0) { inst->dst.reg = (new_virtual_grf[inst->dst.reg] + @@ -1284,7 +1313,7 @@ vec4_visitor::split_virtual_grfs() } } } - invalidate_live_intervals(false); + invalidate_live_intervals(); } void @@ -1299,13 +1328,20 @@ vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file) vec4_instruction *inst = (vec4_instruction *)be_inst; if (inst->predicate) { - fprintf(file, "(%cf0) ", - inst->predicate_inverse ? '-' : '+'); + fprintf(file, "(%cf0.%d) ", + inst->predicate_inverse ? '-' : '+', + inst->flag_subreg); } fprintf(file, "%s", brw_instruction_name(inst->opcode)); if (inst->conditional_mod) { fprintf(file, "%s", conditional_modifier[inst->conditional_mod]); + if (!inst->predicate && + (brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL && + inst->opcode != BRW_OPCODE_IF && + inst->opcode != BRW_OPCODE_WHILE))) { + fprintf(file, ".f0.%d", inst->flag_subreg); + } } fprintf(file, " "); @@ -1361,7 +1397,10 @@ vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file) if (inst->dst.writemask & 8) fprintf(file, "w"); } - fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type)); + fprintf(file, ":%s", brw_reg_type_letters(inst->dst.type)); + + if (inst->src[0].file != BAD_FILE) + fprintf(file, ", "); for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) { if (inst->src[i].negate) @@ -1389,6 +1428,13 @@ vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file) case BRW_REGISTER_TYPE_UD: fprintf(file, "%uU", inst->src[i].fixed_hw_reg.dw1.ud); break; + case BRW_REGISTER_TYPE_VF: + fprintf(file, "[%-gF, %-gF, %-gF, %-gF]", + brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 0) & 0xff), + brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 8) & 0xff), + brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 16) & 0xff), + brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 24) & 0xff)); + break; default: fprintf(file, "???"); break; @@ -1438,7 +1484,7 @@ vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file) /* Don't print .0; and only VGRFs have reg_offsets and sizes */ if (inst->src[i].reg_offset != 0 && inst->src[i].file == GRF && - virtual_grf_sizes[inst->src[i].reg] != 1) + alloc.sizes[inst->src[i].reg] != 1) fprintf(file, ".%d", inst->src[i].reg_offset); if (inst->src[i].file != IMM) { @@ -1490,7 +1536,7 @@ void vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map, bool interleaved) { - foreach_in_list(vec4_instruction, inst, &instructions) { + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { /* We have to support ATTR as a destination for GL_FIXED fixup. */ if (inst->dst.file == ATTR) { int grf = attribute_map[inst->dst.reg + inst->dst.reg_offset]; @@ -1552,7 +1598,7 @@ vec4_vs_visitor::setup_attributes(int payload_reg) * don't represent it with a flag in inputs_read, so we call it * VERT_ATTRIB_MAX. */ - if (vs_prog_data->uses_vertexid) { + if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) { attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes; nr_attributes++; } @@ -1606,7 +1652,8 @@ vec4_visitor::setup_uniforms(int reg) stage_prog_data->nr_params = this->uniforms * 4; - prog_data->curb_read_length = reg - prog_data->base.dispatch_grf_start_reg; + prog_data->base.curb_read_length = + reg - prog_data->base.dispatch_grf_start_reg; return reg; } @@ -1629,6 +1676,12 @@ vec4_vs_visitor::setup_payload(void) this->first_non_payload_grf = reg; } +void +vec4_visitor::assign_binding_table_offsets() +{ + assign_common_binding_table_offsets(0); +} + src_reg vec4_visitor::get_timestamp() { @@ -1637,6 +1690,8 @@ vec4_visitor::get_timestamp() src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE, BRW_ARF_TIMESTAMP, 0, + 0, + 0, BRW_REGISTER_TYPE_UD, BRW_VERTICAL_STRIDE_0, BRW_WIDTH_4, @@ -1717,7 +1772,9 @@ vec4_visitor::emit_shader_time_write(enum shader_time_shader_type type, time.type = BRW_REGISTER_TYPE_UD; emit(MOV(time, src_reg(value))); - emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst)); + vec4_instruction *inst = + emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst)); + inst->mlen = 2; } bool @@ -1728,7 +1785,7 @@ vec4_visitor::run() if (INTEL_DEBUG & DEBUG_SHADER_TIME) emit_shader_time_begin(); - assign_common_binding_table_offsets(0); + assign_binding_table_offsets(); emit_prolog(); @@ -1747,6 +1804,8 @@ vec4_visitor::run() emit_thread_end(); + calculate_cfg(); + /* Before any optimization, push array accesses out to scratch * space where we need them to be. This pass may allocate new * virtual GRFs, so we want to do it early. It also makes sure @@ -1771,46 +1830,56 @@ vec4_visitor::run() const char *stage_name = stage == MESA_SHADER_GEOMETRY ? "gs" : "vs"; -#define OPT(pass, args...) do { \ +#define OPT(pass, args...) ({ \ pass_num++; \ bool this_progress = pass(args); \ \ if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \ char filename[64]; \ snprintf(filename, 64, "%s-%04d-%02d-%02d-" #pass, \ - stage_name, shader_prog->Name, iteration, pass_num); \ + stage_name, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \ \ backend_visitor::dump_instructions(filename); \ } \ \ progress = progress || this_progress; \ - } while (false) + this_progress; \ + }) if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) { char filename[64]; snprintf(filename, 64, "%s-%04d-00-start", - stage_name, shader_prog->Name); + stage_name, shader_prog ? shader_prog->Name : 0); backend_visitor::dump_instructions(filename); } bool progress; int iteration = 0; + int pass_num = 0; do { progress = false; + pass_num = 0; iteration++; - int pass_num = 0; OPT(opt_reduce_swizzle); OPT(dead_code_eliminate); OPT(dead_control_flow_eliminate, this); OPT(opt_copy_propagation); - OPT(opt_algebraic); OPT(opt_cse); + OPT(opt_algebraic); OPT(opt_register_coalesce); } while (progress); + pass_num = 0; + + if (OPT(opt_vector_float)) { + OPT(opt_cse); + OPT(opt_copy_propagation, false); + OPT(opt_copy_propagation, true); + OPT(dead_code_eliminate); + } if (failed) return false; @@ -1819,9 +1888,9 @@ vec4_visitor::run() if (false) { /* Debug of register spilling: Go spill everything. */ - const int grf_count = virtual_grf_count; - float spill_costs[virtual_grf_count]; - bool no_spill[virtual_grf_count]; + const int grf_count = alloc.count; + float spill_costs[alloc.count]; + bool no_spill[alloc.count]; evaluate_spill_costs(spill_costs, no_spill); for (int i = 0; i < grf_count; i++) { if (no_spill[i]) @@ -1846,8 +1915,6 @@ vec4_visitor::run() */ assert(sanity_param_count == prog->Parameters->NumParameters); - calculate_cfg(); - return !failed; } @@ -1870,6 +1937,7 @@ brw_vs_emit(struct brw_context *brw, { bool start_busy = false; double start_time = 0; + const unsigned *assembly = NULL; if (unlikely(brw->perf_debug)) { start_busy = (brw->batch.last_bo && @@ -1882,25 +1950,56 @@ brw_vs_emit(struct brw_context *brw, shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX]; if (unlikely(INTEL_DEBUG & DEBUG_VS)) - brw_dump_ir(brw, "vertex", prog, &shader->base, &c->vp->program.Base); + brw_dump_ir("vertex", prog, &shader->base, &c->vp->program.Base); + + if (prog && brw->gen >= 8 && brw->scalar_vs) { + fs_visitor v(brw, mem_ctx, &c->key, prog_data, prog, &c->vp->program, 8); + if (!v.run_vs()) { + if (prog) { + prog->LinkStatus = false; + ralloc_strcat(&prog->InfoLog, v.fail_msg); + } - vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx); - if (!v.run()) { - if (prog) { - prog->LinkStatus = false; - ralloc_strcat(&prog->InfoLog, v.fail_msg); + _mesa_problem(NULL, "Failed to compile vertex shader: %s\n", + v.fail_msg); + + return NULL; } - _mesa_problem(NULL, "Failed to compile vertex shader: %s\n", - v.fail_msg); + fs_generator g(brw, mem_ctx, (void *) &c->key, &prog_data->base.base, + &c->vp->program.Base, v.runtime_check_aads_emit, "VS"); + if (INTEL_DEBUG & DEBUG_VS) { + char *name = ralloc_asprintf(mem_ctx, "%s vertex shader %d", + prog->Label ? prog->Label : "unnamed", + prog->Name); + g.enable_debug(name); + } + g.generate_code(v.cfg, 8); + assembly = g.get_assembly(final_assembly_size); - return NULL; + if (assembly) + prog_data->base.simd8 = true; + c->base.last_scratch = v.last_scratch; } - const unsigned *assembly = NULL; - vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base, - mem_ctx, INTEL_DEBUG & DEBUG_VS); - assembly = g.generate_assembly(v.cfg, final_assembly_size); + if (!assembly) { + vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx); + if (!v.run()) { + if (prog) { + prog->LinkStatus = false; + ralloc_strcat(&prog->InfoLog, v.fail_msg); + } + + _mesa_problem(NULL, "Failed to compile vertex shader: %s\n", + v.fail_msg); + + return NULL; + } + + vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base, + mem_ctx, INTEL_DEBUG & DEBUG_VS, "vertex", "VS"); + assembly = g.generate_assembly(v.cfg, final_assembly_size); + } if (unlikely(brw->perf_debug) && shader) { if (shader->compiled_once) { @@ -1918,16 +2017,17 @@ brw_vs_emit(struct brw_context *brw, void -brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx, - struct brw_vec4_prog_key *key, - GLuint id, struct gl_program *prog) +brw_vue_setup_prog_key_for_precompile(struct gl_context *ctx, + struct brw_vue_prog_key *key, + GLuint id, struct gl_program *prog) { + struct brw_context *brw = brw_context(ctx); key->program_string_id = id; - key->clamp_vertex_color = ctx->API == API_OPENGL_COMPAT; + const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8; unsigned sampler_count = _mesa_fls(prog->SamplersUsed); for (unsigned i = 0; i < sampler_count; i++) { - if (prog->ShadowSamplers & (1 << i)) { + if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) { /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */ key->tex.swizzles[i] = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);