#include "main/hash_table.h"
#include "main/macros.h"
#include "main/shaderobj.h"
-#include "main/uniforms.h"
#include "main/fbobject.h"
#include "program/prog_parameter.h"
#include "program/prog_print.h"
#include "brw_wm.h"
}
#include "brw_fs.h"
+#include "brw_dead_control_flow.h"
+#include "main/uniforms.h"
+#include "brw_fs_live_variables.h"
#include "glsl/glsl_types.h"
void
ALU1(FBH)
ALU1(FBL)
ALU1(CBIT)
+ALU3(MAD)
+ALU2(ADDC)
+ALU2(SUBB)
+ALU2(SEL)
/** Gen4 predicated IF. */
fs_inst *
return inst;
}
-/** Gen6+ IF with embedded comparison. */
+/** Gen6 IF with embedded comparison. */
fs_inst *
fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition)
{
- assert(intel->gen >= 6);
+ assert(brw->gen == 6);
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF,
reg_null_d, src0, src1);
inst->conditional_mod = condition;
* mostly work out for float-interpreted-as-int since our comparisons are
* for >0, =0, <0.
*/
- if (intel->gen == 4) {
+ if (brw->gen == 4) {
dst.type = src0.type;
if (dst.file == HW_REG)
dst.fixed_hw_reg.type = dst.type;
varying_offset, const_offset & ~3));
int scale = 1;
- if (intel->gen == 4 && dispatch_width == 8) {
+ if (brw->gen == 4 && dispatch_width == 8) {
/* Pre-gen5, we can either use a SIMD8 message that requires (header,
* u, v, r) as parameters, or we can just use the SIMD16 message
* consisting of (header, u). We choose the second, at the cost of a
}
enum opcode op;
- if (intel->gen >= 7)
+ if (brw->gen >= 7)
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
else
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
inst->regs_written = 4 * scale;
instructions.push_tail(inst);
- if (intel->gen < 7) {
+ if (brw->gen < 7) {
inst->base_mrf = 13;
inst->header_present = true;
- if (intel->gen == 4)
+ if (brw->gen == 4)
inst->mlen = 3;
else
inst->mlen = 1 + dispatch_width / 8;
return (opcode == FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7 ||
opcode == SHADER_OPCODE_SHADER_TIME_ADD ||
(opcode == FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD &&
- src[1].file == GRF));
+ src[1].file == GRF) ||
+ (is_tex() && src[0].file == GRF));
}
bool
fs_visitor::can_do_source_mods(fs_inst *inst)
{
- if (intel->gen == 6 && inst->is_math())
+ if (brw->gen == 6 && inst->is_math())
return false;
if (inst->is_send_from_grf())
return false;
+ if (!inst->can_do_source_mods())
+ return false;
+
return true;
}
this->imm.u = u;
}
-/** Fixed brw_reg Immediate value constructor. */
+/** Fixed brw_reg. */
fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
{
init();
imm.u == r.imm.u);
}
+fs_reg
+fs_reg::retype(uint32_t type)
+{
+ fs_reg result = *this;
+ result.type = type;
+ return result;
+}
+
bool
fs_reg::is_zero() const
{
return type == BRW_REGISTER_TYPE_F ? imm.f == 1.0 : imm.i == 1;
}
+bool
+fs_reg::is_null() const
+{
+ return file == HW_REG &&
+ fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ fixed_hw_reg.nr == BRW_ARF_NULL;
+}
+
bool
fs_reg::is_valid_3src() const
{
* link time.
*/
return 0;
+ case GLSL_TYPE_ATOMIC_UINT:
+ return 0;
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
fs_reg
fs_visitor::get_timestamp()
{
- assert(intel->gen >= 7);
+ assert(brw->gen >= 7);
fs_reg ts = fs_reg(retype(brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
BRW_ARF_TIMESTAMP,
assert(force_uncompressed_stack >= 0);
}
-void
-fs_visitor::push_force_sechalf()
-{
- force_sechalf_stack++;
-}
-
-void
-fs_visitor::pop_force_sechalf()
-{
- force_sechalf_stack--;
- assert(force_sechalf_stack >= 0);
-}
-
/**
* Returns true if the instruction has a flag that means it won't
* update an entire destination register.
bool
fs_inst::is_partial_write()
{
- return (this->predicate ||
+ return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
this->force_uncompressed ||
this->force_sechalf);
}
+int
+fs_inst::regs_read(fs_visitor *v, int arg)
+{
+ if (is_tex() && arg == 0 && src[0].file == GRF) {
+ if (v->dispatch_width == 16)
+ return (mlen + 1) / 2;
+ else
+ return mlen;
+ }
+ return 1;
+}
+
+bool
+fs_inst::reads_flag()
+{
+ return predicate;
+}
+
+bool
+fs_inst::writes_flag()
+{
+ return (conditional_mod && opcode != BRW_OPCODE_SEL) ||
+ opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
+}
+
/**
* Returns how many MRFs an FS opcode will write over.
*
if (inst->mlen == 0)
return 0;
+ if (inst->base_mrf == -1)
+ return 0;
+
switch (inst->opcode) {
case SHADER_OPCODE_RCP:
case SHADER_OPCODE_RSQ:
case SHADER_OPCODE_TXD:
case SHADER_OPCODE_TXF:
case SHADER_OPCODE_TXF_MS:
+ case SHADER_OPCODE_TG4:
+ case SHADER_OPCODE_TG4_OFFSET:
case SHADER_OPCODE_TXL:
case SHADER_OPCODE_TXS:
case SHADER_OPCODE_LOD:
case FS_OPCODE_FB_WRITE:
return 2;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
- case FS_OPCODE_UNSPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
return inst->mlen;
- case FS_OPCODE_SPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
return 2;
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ return 0;
default:
assert(!"not reached");
return inst->mlen;
wpos.reg_offset++;
/* gl_FragCoord.z */
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
} else {
emit(FS_OPCODE_LINTERP, wpos,
bool is_centroid)
{
brw_wm_barycentric_interp_mode barycoord_mode;
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
if (is_centroid) {
if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
int location = ir->location;
for (unsigned int i = 0; i < array_elements; i++) {
for (unsigned int j = 0; j < type->matrix_columns; j++) {
- if (urb_setup[location] == -1) {
+ if (c->prog_data.urb_setup[location] == -1) {
/* If there's no incoming setup data for this slot, don't
* emit interpolation for it.
*/
inst->predicate = BRW_PREDICATE_NORMAL;
inst->predicate_inverse = true;
}
- if (intel->gen < 6) {
+ if (brw->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
}
attr.reg_offset++;
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
/* The frontfacing comes in as a bit in the thread payload. */
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
emit(BRW_OPCODE_ASR, *reg,
fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
fs_reg(15));
return reg;
}
+void
+fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
+{
+ assert(dst.type == BRW_REGISTER_TYPE_F);
+
+ if (c->key.compute_pos_offset) {
+ /* Convert int_sample_pos to floating point */
+ emit(MOV(dst, int_sample_pos));
+ /* Scale to the range [0, 1] */
+ emit(MUL(dst, dst, fs_reg(1 / 16.0f)));
+ }
+ else {
+ /* From ARB_sample_shading specification:
+ * "When rendering to a non-multisample buffer, or if multisample
+ * rasterization is disabled, gl_SamplePosition will always be
+ * (0.5, 0.5).
+ */
+ emit(MOV(dst, fs_reg(0.5f)));
+ }
+}
+
+fs_reg *
+fs_visitor::emit_samplepos_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 6);
+ assert(ir->type == glsl_type::vec2_type);
+
+ this->current_annotation = "compute sample position";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+ fs_reg pos = *reg;
+ fs_reg int_sample_x = fs_reg(this, glsl_type::int_type);
+ fs_reg int_sample_y = fs_reg(this, glsl_type::int_type);
+
+ /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
+ * mode will be enabled.
+ *
+ * From the Ivy Bridge PRM, volume 2 part 1, page 344:
+ * R31.1:0 Position Offset X/Y for Slot[3:0]
+ * R31.3:2 Position Offset X/Y for Slot[7:4]
+ * .....
+ *
+ * The X, Y sample positions come in as bytes in thread payload. So, read
+ * the positions using vstride=16, width=8, hstride=2.
+ */
+ struct brw_reg sample_pos_reg =
+ stride(retype(brw_vec1_grf(c->sample_pos_reg, 0),
+ BRW_REGISTER_TYPE_B), 16, 8, 2);
+
+ emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+ if (dispatch_width == 16) {
+ int_sample_x.sechalf = true;
+ fs_inst *inst = emit(MOV(int_sample_x,
+ fs_reg(suboffset(sample_pos_reg, 16))));
+ inst->force_sechalf = true;
+ int_sample_x.sechalf = false;
+ }
+ /* Compute gl_SamplePosition.x */
+ compute_sample_position(pos, int_sample_x);
+ pos.reg_offset++;
+ emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+ if (dispatch_width == 16) {
+ int_sample_y.sechalf = true;
+ fs_inst *inst = emit(MOV(int_sample_y,
+ fs_reg(suboffset(sample_pos_reg, 17))));
+ inst->force_sechalf = true;
+ int_sample_y.sechalf = false;
+ }
+ /* Compute gl_SamplePosition.y */
+ compute_sample_position(pos, int_sample_y);
+ return reg;
+}
+
+fs_reg *
+fs_visitor::emit_sampleid_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 6);
+
+ this->current_annotation = "compute sample id";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+
+ if (c->key.compute_sample_id) {
+ fs_reg t1 = fs_reg(this, glsl_type::int_type);
+ fs_reg t2 = fs_reg(this, glsl_type::int_type);
+ t2.type = BRW_REGISTER_TYPE_UW;
+
+ /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
+ * 8x multisampling, subspan 0 will represent sample N (where N
+ * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
+ * 7. We can find the value of N by looking at R0.0 bits 7:6
+ * ("Starting Sample Pair Index (SSPI)") and multiplying by two
+ * (since samples are always delivered in pairs). That is, we
+ * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
+ * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
+ * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
+ * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
+ * populating a temporary variable with the sequence (0, 1, 2, 3),
+ * and then reading from it using vstride=1, width=4, hstride=0.
+ * These computations hold good for 4x multisampling as well.
+ */
+ emit(BRW_OPCODE_AND, t1,
+ fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
+ fs_reg(brw_imm_d(0xc0)));
+ emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
+ /* This works for both SIMD8 and SIMD16 */
+ emit(MOV(t2, brw_imm_v(0x3210)));
+ /* This special instruction takes care of setting vstride=1,
+ * width=4, hstride=0 of t2 during an ADD instruction.
+ */
+ emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
+ } else {
+ /* As per GL_ARB_sample_shading specification:
+ * "When rendering to a non-multisample buffer, or if multisample
+ * rasterization is disabled, gl_SampleID will always be zero."
+ */
+ emit(BRW_OPCODE_MOV, *reg, fs_reg(0));
+ }
+
+ return reg;
+}
+
fs_reg
fs_visitor::fix_math_operand(fs_reg src)
{
* The hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up.
*/
- if (intel->gen == 6 && src.file != UNIFORM && src.file != IMM &&
+ if (brw->gen == 6 && src.file != UNIFORM && src.file != IMM &&
!src.abs && !src.negate)
return src;
/* Gen7 relaxes most of the above restrictions, but still can't use IMM
* operands to math
*/
- if (intel->gen >= 7 && src.file != IMM)
+ if (brw->gen >= 7 && src.file != IMM)
return src;
fs_reg expanded = fs_reg(this, glsl_type::float_type);
* Gen 6 hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up.
*/
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
src = fix_math_operand(src);
fs_inst *inst = emit(opcode, dst, src);
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
inst->base_mrf = 2;
inst->mlen = dispatch_width / 8;
}
switch (opcode) {
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
- if (intel->gen >= 7 && dispatch_width == 16)
+ if (brw->gen >= 7 && dispatch_width == 16)
fail("16-wide INTDIV unsupported\n");
break;
case SHADER_OPCODE_POW:
return NULL;
}
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
src0 = fix_math_operand(src0);
src1 = fix_math_operand(src1);
fs_visitor::calculate_urb_setup()
{
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- urb_setup[i] = -1;
+ c->prog_data.urb_setup[i] = -1;
}
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
- if (intel->gen >= 6) {
- for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
- urb_setup[i] = urb_next++;
- }
+ if (brw->gen >= 6) {
+ if (_mesa_bitcount_64(fp->Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) <= 16) {
+ /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
+ * first 16 varying inputs, so we can put them wherever we want.
+ * Just put them in order.
+ *
+ * This is useful because it means that (a) inputs not used by the
+ * fragment shader won't take up valuable register space, and (b) we
+ * won't have to recompile the fragment shader if it gets paired with
+ * a different vertex (or geometry) shader.
+ */
+ for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
+ if (fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
+ BITFIELD64_BIT(i)) {
+ c->prog_data.urb_setup[i] = urb_next++;
+ }
+ }
+ } else {
+ /* We have enough input varyings that the SF/SBE pipeline stage can't
+ * arbitrarily rearrange them to suit our whim; we have to put them
+ * in an order that matches the output of the previous pipeline stage
+ * (geometry or vertex shader).
+ */
+ struct brw_vue_map prev_stage_vue_map;
+ brw_compute_vue_map(brw, &prev_stage_vue_map,
+ c->key.input_slots_valid);
+ int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
+ assert(prev_stage_vue_map.num_slots <= first_slot + 32);
+ for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
+ slot++) {
+ int varying = prev_stage_vue_map.slot_to_varying[slot];
+ /* Note that varying == BRW_VARYING_SLOT_COUNT when a slot is
+ * unused.
+ */
+ if (varying != BRW_VARYING_SLOT_COUNT &&
+ (fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
+ BITFIELD64_BIT(varying))) {
+ c->prog_data.urb_setup[varying] = slot - first_slot;
+ }
+ }
+ urb_next = prev_stage_vue_map.num_slots - first_slot;
}
} else {
/* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
* incremented, mapped or not.
*/
if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
- urb_setup[i] = urb_next;
+ c->prog_data.urb_setup[i] = urb_next;
urb_next++;
}
}
* See compile_sf_prog() for more info.
*/
if (fp->Base.InputsRead & BITFIELD64_BIT(VARYING_SLOT_PNTC))
- urb_setup[VARYING_SLOT_PNTC] = urb_next++;
+ c->prog_data.urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
- /* Each attribute is 4 setup channels, each of which is half a reg. */
- c->prog_data.urb_read_length = urb_next * 2;
+ c->prog_data.num_varying_inputs = urb_next;
}
void
}
}
- this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
+ /* Each attribute is 4 setup channels, each of which is half a reg. */
+ this->first_non_payload_grf =
+ urb_start + c->prog_data.num_varying_inputs * 2;
}
/**
* the send is reading the whole thing.
*/
if (inst->is_send_from_grf()) {
- split_grf[inst->src[0].reg] = false;
+ for (int i = 0; i < 3; i++) {
+ if (inst->src[i].file == GRF) {
+ split_grf[inst->src[i].reg] = false;
+ }
+ }
}
}
}
}
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}
/**
if (remap_table[i] != -1) {
remap_table[i] = new_index;
virtual_grf_sizes[new_index] = virtual_grf_sizes[i];
- if (live_intervals_valid) {
- virtual_grf_start[new_index] = virtual_grf_start[i];
- virtual_grf_end[new_index] = virtual_grf_end[i];
- }
+ invalidate_live_intervals();
++new_index;
}
}
base_ir = inst->ir;
current_annotation = inst->annotation;
- fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg surf_index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg temp = fs_reg(this, glsl_type::float_type);
exec_list list = VARYING_PULL_CONSTANT_LOAD(temp,
surf_index,
assert(!inst->src[i].reladdr);
fs_reg dst = fs_reg(this, glsl_type::float_type);
- fs_reg index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
fs_inst *pull =
new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
break;
}
break;
+ case BRW_OPCODE_OR:
+ if (inst->src[0].equals(inst->src[1])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ break;
+ }
+ break;
+ case BRW_OPCODE_SEL:
+ if (inst->saturate && inst->src[1].file == IMM) {
+ switch (inst->conditional_mod) {
+ case BRW_CONDITIONAL_LE:
+ case BRW_CONDITIONAL_L:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f >= 1.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case BRW_CONDITIONAL_GE:
+ case BRW_CONDITIONAL_G:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f <= 0.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ inst->conditional_mod = BRW_CONDITIONAL_NONE;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
default:
break;
}
foreach_list_safe(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
- if (inst->dst.file == GRF) {
- assert(this->virtual_grf_end[inst->dst.reg] >= pc);
- if (this->virtual_grf_end[inst->dst.reg] == pc) {
- inst->remove();
- progress = true;
+ if (inst->dst.file == GRF && !inst->has_side_effects()) {
+ bool dead = true;
+
+ for (int i = 0; i < inst->regs_written; i++) {
+ int var = live_intervals->var_from_vgrf[inst->dst.reg];
+ assert(live_intervals->end[var + inst->dst.reg_offset + i] >= pc);
+ if (live_intervals->end[var + inst->dst.reg_offset + i] != pc) {
+ dead = false;
+ break;
+ }
+ }
+
+ if (dead) {
+ /* Don't dead code eliminate instructions that write to the
+ * accumulator as a side-effect. Instead just set the destination
+ * to the null register to free it.
+ */
+ switch (inst->opcode) {
+ case BRW_OPCODE_ADDC:
+ case BRW_OPCODE_SUBB:
+ case BRW_OPCODE_MACH:
+ inst->dst = fs_reg(retype(brw_null_reg(), inst->dst.type));
+ break;
+ default:
+ inst->remove();
+ progress = true;
+ break;
+ }
}
}
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
get_dead_code_hash_entry(ht, inst->dst.reg,
inst->dst.reg_offset);
- if (inst->is_partial_write()) {
- /* For a partial write, we can't remove any previous dead code
- * candidate, since we're just modifying their result, but we can
- * be dead code eliminiated ourselves.
- */
- if (entry) {
- entry->data = inst;
+ if (entry) {
+ if (inst->is_partial_write()) {
+ /* For a partial write, we can't remove any previous dead code
+ * candidate, since we're just modifying their result.
+ */
} else {
- insert_dead_code_hash(ht, inst->dst.reg, inst->dst.reg_offset,
- inst);
- }
- } else {
- if (entry) {
/* We're completely updating a channel, and there was a
* previous write to the channel that wasn't read. Kill it!
*/
fs_inst *inst = (fs_inst *)entry->data;
inst->remove();
progress = true;
- _mesa_hash_table_remove(ht, entry);
}
+ _mesa_hash_table_remove(ht, entry);
+ }
+
+ if (!inst->has_side_effects())
insert_dead_code_hash(ht, inst->dst.reg, inst->dst.reg_offset,
inst);
- }
}
}
}
_mesa_hash_table_destroy(ht, NULL);
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
/**
- * Implements a second type of register coalescing: This one checks if
- * the two regs involved in a raw move don't interfere, in which case
- * they can both by stored in the same place and the MOV removed.
+ * Implements register coalescing: Checks if the two registers involved in a
+ * raw move don't interfere, in which case they can both be stored in the same
+ * place and the MOV removed.
*/
bool
-fs_visitor::register_coalesce_2()
+fs_visitor::register_coalesce()
{
bool progress = false;
inst->src[0].smear != -1 ||
inst->dst.file != GRF ||
inst->dst.type != inst->src[0].type ||
- virtual_grf_sizes[inst->src[0].reg] != 1 ||
- virtual_grf_interferes(inst->dst.reg, inst->src[0].reg)) {
+ virtual_grf_sizes[inst->src[0].reg] != 1) {
continue;
}
+ int var_from = live_intervals->var_from_reg(&inst->src[0]);
+ int var_to = live_intervals->var_from_reg(&inst->dst);
+
+ if (live_intervals->vars_interfere(var_from, var_to) &&
+ !inst->dst.equals(inst->src[0]))
+ continue;
+
int reg_from = inst->src[0].reg;
assert(inst->src[0].reg_offset == 0);
int reg_to = inst->dst.reg;
}
inst->remove();
-
- /* We don't need to recalculate live intervals inside the loop despite
- * flagging live_intervals_valid because we only use live intervals for
- * the interferes test, and we must have had a situation where the
- * intervals were:
- *
- * from to
- * ^
- * |
- * v
- * ^
- * |
- * v
- *
- * Some register R that might get coalesced with one of these two could
- * only be referencing "to", otherwise "from"'s range would have been
- * longer. R's range could also only start at the end of "to" or later,
- * otherwise it will conflict with "to" when we try to coalesce "to"
- * into Rw anyway.
- */
- live_intervals_valid = false;
-
progress = true;
continue;
}
- return progress;
-}
-
-bool
-fs_visitor::register_coalesce()
-{
- bool progress = false;
- int if_depth = 0;
- int loop_depth = 0;
-
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- /* Make sure that we dominate the instructions we're going to
- * scan for interfering with our coalescing, or we won't have
- * scanned enough to see if anything interferes with our
- * coalescing. We don't dominate the following instructions if
- * we're in a loop or an if block.
- */
- switch (inst->opcode) {
- case BRW_OPCODE_DO:
- loop_depth++;
- break;
- case BRW_OPCODE_WHILE:
- loop_depth--;
- break;
- case BRW_OPCODE_IF:
- if_depth++;
- break;
- case BRW_OPCODE_ENDIF:
- if_depth--;
- break;
- default:
- break;
- }
- if (loop_depth || if_depth)
- continue;
-
- if (inst->opcode != BRW_OPCODE_MOV ||
- inst->is_partial_write() ||
- inst->saturate ||
- inst->dst.file != GRF || (inst->src[0].file != GRF &&
- inst->src[0].file != UNIFORM)||
- inst->dst.type != inst->src[0].type)
- continue;
-
- bool has_source_modifiers = (inst->src[0].abs ||
- inst->src[0].negate ||
- inst->src[0].smear != -1 ||
- inst->src[0].file == UNIFORM);
-
- /* Found a move of a GRF to a GRF. Let's see if we can coalesce
- * them: check for no writes to either one until the exit of the
- * program.
- */
- bool interfered = false;
-
- for (fs_inst *scan_inst = (fs_inst *)inst->next;
- !scan_inst->is_tail_sentinel();
- scan_inst = (fs_inst *)scan_inst->next) {
- if (scan_inst->dst.file == GRF) {
- if (scan_inst->overwrites_reg(inst->dst) ||
- scan_inst->overwrites_reg(inst->src[0])) {
- interfered = true;
- break;
- }
- }
-
- /* The gen6 MATH instruction can't handle source modifiers or
- * unusual register regions, so avoid coalescing those for
- * now. We should do something more specific.
- */
- if (has_source_modifiers && !can_do_source_mods(scan_inst)) {
- interfered = true;
- break;
- }
-
- /* The accumulator result appears to get used for the
- * conditional modifier generation. When negating a UD
- * value, there is a 33rd bit generated for the sign in the
- * accumulator value, so now you can't check, for example,
- * equality with a 32-bit value. See piglit fs-op-neg-uint.
- */
- if (scan_inst->conditional_mod &&
- inst->src[0].negate &&
- inst->src[0].type == BRW_REGISTER_TYPE_UD) {
- interfered = true;
- break;
- }
- }
- if (interfered) {
- continue;
- }
-
- /* Rewrite the later usage to point at the source of the move to
- * be removed.
- */
- for (fs_inst *scan_inst = inst;
- !scan_inst->is_tail_sentinel();
- scan_inst = (fs_inst *)scan_inst->next) {
- for (int i = 0; i < 3; i++) {
- if (scan_inst->src[i].file == GRF &&
- scan_inst->src[i].reg == inst->dst.reg &&
- scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
- fs_reg new_src = inst->src[0];
- if (scan_inst->src[i].abs) {
- new_src.negate = 0;
- new_src.abs = 1;
- }
- new_src.negate ^= scan_inst->src[i].negate;
- scan_inst->src[i] = new_src;
- }
- }
- }
-
- inst->remove();
- progress = true;
- }
-
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
-
bool
fs_visitor::compute_to_mrf()
{
if (scan_inst->mlen)
break;
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
/* gen6 math instructions must have the destination be
* GRF, so no compute-to-MRF for them.
*/
}
}
- if (scan_inst->mlen > 0) {
+ if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
/* Found a SEND instruction, which means that there are
* live values in MRFs from base_mrf to base_mrf +
* scan_inst->mlen - 1. Don't go pushing our MRF write up
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
last_mrf_move[inst->dst.reg] = NULL;
}
- if (inst->mlen > 0) {
+ if (inst->mlen > 0 && inst->base_mrf != -1) {
/* Found a SEND instruction, which will include two or fewer
* implied MRF writes. We could do better here.
*/
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
void
fs_visitor::insert_gen4_send_dependency_workarounds()
{
- if (intel->gen != 4 || intel->is_g4x)
+ if (brw->gen != 4 || brw->is_g4x)
return;
/* Note that we're done with register allocation, so GRF fs_regs always
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
continue;
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
/* The offset arg before was a vec4-aligned byte offset. We need to
* turn it into a dword offset.
*/
inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
inst->src[1] = payload;
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
} else {
/* Before register allocation, we didn't tell the scheduler about the
* MRF we use. We know it's safe to use this MRF because nothing
if (inst->saturate)
printf(".sat");
if (inst->conditional_mod) {
- printf(".cmod");
+ printf("%s", conditional_modifier[inst->conditional_mod]);
if (!inst->predicate &&
- (intel->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
+ (brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
- printf(".f0.%d\n", inst->flag_subreg);
+ printf(".f0.%d", inst->flag_subreg);
}
}
printf(" ");
case UNIFORM:
printf("***u%d***", inst->dst.reg);
break;
+ case HW_REG:
+ if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
+ switch (inst->dst.fixed_hw_reg.nr) {
+ case BRW_ARF_NULL:
+ printf("null");
+ break;
+ case BRW_ARF_ADDRESS:
+ printf("a0.%d", inst->dst.fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_ACCUMULATOR:
+ printf("acc%d", inst->dst.fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_FLAG:
+ printf("f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ inst->dst.fixed_hw_reg.subnr);
+ break;
+ default:
+ printf("arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ inst->dst.fixed_hw_reg.subnr);
+ break;
+ }
+ } else {
+ printf("hw_reg%d", inst->dst.fixed_hw_reg.nr);
+ }
+ if (inst->dst.fixed_hw_reg.subnr)
+ printf("+%d", inst->dst.fixed_hw_reg.subnr);
+ break;
default:
printf("???");
break;
}
- printf(", ");
+ printf(":%s, ", reg_encoding[inst->dst.type]);
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
if (inst->src[i].negate)
printf("-");
if (inst->src[i].abs)
break;
}
break;
+ case HW_REG:
+ if (inst->src[i].fixed_hw_reg.negate)
+ printf("-");
+ if (inst->src[i].fixed_hw_reg.abs)
+ printf("|");
+ if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
+ switch (inst->src[i].fixed_hw_reg.nr) {
+ case BRW_ARF_NULL:
+ printf("null");
+ break;
+ case BRW_ARF_ADDRESS:
+ printf("a0.%d", inst->src[i].fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_ACCUMULATOR:
+ printf("acc%d", inst->src[i].fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_FLAG:
+ printf("f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ inst->src[i].fixed_hw_reg.subnr);
+ break;
+ default:
+ printf("arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ inst->src[i].fixed_hw_reg.subnr);
+ break;
+ }
+ } else {
+ printf("hw_reg%d", inst->src[i].fixed_hw_reg.nr);
+ }
+ if (inst->src[i].fixed_hw_reg.subnr)
+ printf("+%d", inst->src[i].fixed_hw_reg.subnr);
+ if (inst->src[i].fixed_hw_reg.abs)
+ printf("|");
+ break;
default:
printf("???");
break;
if (inst->src[i].abs)
printf("|");
- if (i < 3)
+ if (inst->src[i].file != IMM) {
+ printf(":%s", reg_encoding[inst->src[i].type]);
+ }
+
+ if (i < 2 && inst->src[i + 1].file != BAD_FILE)
printf(", ");
}
(fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0;
unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
- assert(intel->gen >= 6);
+ assert(brw->gen >= 6);
/* R0-1: masks, pixel X/Y coordinates. */
c->nr_payload_regs = 2;
c->nr_payload_regs++;
}
}
+
+ c->prog_data.uses_pos_offset = c->key.compute_pos_offset;
/* R31: MSAA position offsets. */
+ if (c->prog_data.uses_pos_offset) {
+ c->sample_pos_reg = c->nr_payload_regs;
+ c->nr_payload_regs++;
+ }
+
/* R32-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
}
}
+void
+fs_visitor::assign_binding_table_offsets()
+{
+ uint32_t next_binding_table_offset = 0;
+
+ /* If there are no color regions, we still perform an FB write to a null
+ * renderbuffer, which we place at surface index 0.
+ */
+ c->prog_data.binding_table.render_target_start = next_binding_table_offset;
+ next_binding_table_offset += MAX2(c->key.nr_color_regions, 1);
+
+ assign_common_binding_table_offsets(next_binding_table_offset);
+}
+
bool
fs_visitor::run()
{
sanity_param_count = fp->Base.Parameters->NumParameters;
uint32_t orig_nr_params = c->prog_data.nr_params;
+ bool allocated_without_spills;
- if (intel->gen >= 6)
+ assign_binding_table_offsets();
+
+ if (brw->gen >= 6)
setup_payload_gen6();
else
setup_payload_gen4();
emit_shader_time_begin();
calculate_urb_setup();
- if (intel->gen < 6)
- emit_interpolation_setup_gen4();
- else
- emit_interpolation_setup_gen6();
+ if (fp->Base.InputsRead > 0) {
+ if (brw->gen < 6)
+ emit_interpolation_setup_gen4();
+ else
+ emit_interpolation_setup_gen6();
+ }
/* We handle discards by keeping track of the still-live pixels in f0.1.
* Initialize it with the dispatched pixels.
*/
- if (fp->UsesKill) {
+ if (fp->UsesKill || c->key.alpha_test_func) {
fs_inst *discard_init = emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
discard_init->flag_subreg = 1;
}
emit(FS_OPCODE_PLACEHOLDER_HALT);
+ if (c->key.alpha_test_func)
+ emit_alpha_test();
+
emit_fb_writes();
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
+ remove_dead_constants();
setup_pull_constants();
bool progress;
progress = opt_algebraic() || progress;
progress = opt_cse() || progress;
progress = opt_copy_propagate() || progress;
+ progress = opt_peephole_sel() || progress;
+ progress = opt_peephole_predicated_break() || progress;
progress = dead_code_eliminate() || progress;
progress = dead_code_eliminate_local() || progress;
- progress = register_coalesce() || progress;
- progress = register_coalesce_2() || progress;
+ progress = dead_control_flow_eliminate(this) || progress;
+ progress = register_coalesce() || progress;
progress = compute_to_mrf() || progress;
} while (progress);
- remove_dead_constants();
-
- schedule_instructions(false);
-
lower_uniform_pull_constant_loads();
assign_curb_setup();
assign_urb_setup();
- if (0) {
- /* Debug of register spilling: Go spill everything. */
- for (int i = 0; i < virtual_grf_count; i++) {
- spill_reg(i);
- }
+ static enum instruction_scheduler_mode pre_modes[] = {
+ SCHEDULE_PRE,
+ SCHEDULE_PRE_NON_LIFO,
+ SCHEDULE_PRE_LIFO,
+ };
+
+ /* Try each scheduling heuristic to see if it can successfully register
+ * allocate without spilling. They should be ordered by decreasing
+ * performance but increasing likelihood of allocating.
+ */
+ for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
+ schedule_instructions(pre_modes[i]);
+
+ if (0) {
+ assign_regs_trivial();
+ allocated_without_spills = true;
+ } else {
+ allocated_without_spills = assign_regs(false);
+ }
+ if (allocated_without_spills)
+ break;
}
- if (0)
- assign_regs_trivial();
- else {
- while (!assign_regs()) {
- if (failed)
- break;
- }
+ if (!allocated_without_spills) {
+ /* We assume that any spilling is worse than just dropping back to
+ * SIMD8. There's probably actually some intermediate point where
+ * SIMD16 with a couple of spills is still better.
+ */
+ if (dispatch_width == 16) {
+ fail("Failure to register allocate. Reduce number of "
+ "live scalar values to avoid this.");
+ }
+
+ /* Since we're out of heuristics, just go spill registers until we
+ * get an allocation.
+ */
+ while (!assign_regs(true)) {
+ if (failed)
+ break;
+ }
}
}
assert(force_uncompressed_stack == 0);
- assert(force_sechalf_stack == 0);
/* This must come after all optimization and register allocation, since
* it inserts dead code that happens to have side effects, and it does
if (failed)
return false;
- schedule_instructions(true);
+ if (!allocated_without_spills)
+ schedule_instructions(SCHEDULE_POST);
if (dispatch_width == 8) {
c->prog_data.reg_blocks = brw_register_blocks(grf_used);
struct gl_shader_program *prog,
unsigned *final_assembly_size)
{
- struct intel_context *intel = &brw->intel;
bool start_busy = false;
float start_time = 0;
- if (unlikely(intel->perf_debug)) {
- start_busy = (intel->batch.last_bo &&
- drm_intel_bo_busy(intel->batch.last_bo));
+ if (unlikely(brw->perf_debug)) {
+ start_busy = (brw->batch.last_bo &&
+ drm_intel_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
exec_list *simd16_instructions = NULL;
fs_visitor v2(brw, c, prog, fp, 16);
- bool no16 = INTEL_DEBUG & DEBUG_NO16;
- if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) {
- v2.import_uniforms(&v);
- if (!v2.run()) {
- perf_debug("16-wide shader failed to compile, falling back to "
- "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
+ if (brw->gen >= 5 && likely(!(INTEL_DEBUG & DEBUG_NO16))) {
+ if (c->prog_data.nr_pull_params == 0) {
+ /* Try a 16-wide compile */
+ v2.import_uniforms(&v);
+ if (!v2.run()) {
+ perf_debug("16-wide shader failed to compile, falling back to "
+ "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
+ } else {
+ simd16_instructions = &v2.instructions;
+ }
} else {
- simd16_instructions = &v2.instructions;
+ perf_debug("Skipping 16-wide due to pull parameters.\n");
}
}
- c->prog_data.dispatch_width = 8;
-
fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
const unsigned *generated = g.generate_assembly(&v.instructions,
simd16_instructions,
final_assembly_size);
- if (unlikely(intel->perf_debug) && shader) {
+ if (unlikely(brw->perf_debug) && shader) {
if (shader->compiled_once)
brw_wm_debug_recompile(brw, prog, &c->key);
shader->compiled_once = true;
- if (start_busy && !drm_intel_bo_busy(intel->batch.last_bo)) {
+ if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
struct brw_wm_prog_key key;
if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT])
memset(&key, 0, sizeof(key));
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
if (fp->UsesKill)
key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
}
- if (intel->gen < 6)
- key.input_slots_valid |= BITFIELD64_BIT(VARYING_SLOT_POS);
-
- for (int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
- continue;
-
- if (intel->gen < 6) {
- if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
- key.input_slots_valid |= BITFIELD64_BIT(i);
- }
- }
+ if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) > 16)
+ key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;
key.clamp_fragment_color = ctx->API == API_OPENGL_COMPAT;
- for (int i = 0; i < MAX_SAMPLERS; i++) {
+ unsigned sampler_count = _mesa_fls(fp->Base.SamplersUsed);
+ for (unsigned i = 0; i < sampler_count; i++) {
if (fp->Base.ShadowSamplers & (1 << i)) {
/* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
key.tex.swizzles[i] =
key.nr_color_regions = 1;
+ /* GL_FRAGMENT_SHADER_DERIVATIVE_HINT is almost always GL_DONT_CARE. The
+ * quality of the derivatives is likely to be determined by the driconf
+ * option.
+ */
+ key.high_quality_derivatives = brw->disable_derivative_optimization;
+
key.program_string_id = bfp->id;
- uint32_t old_prog_offset = brw->wm.prog_offset;
+ uint32_t old_prog_offset = brw->wm.base.prog_offset;
struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
bool success = do_wm_prog(brw, prog, bfp, &key);
- brw->wm.prog_offset = old_prog_offset;
+ brw->wm.base.prog_offset = old_prog_offset;
brw->wm.prog_data = old_prog_data;
return success;