#include "main/hash_table.h"
#include "main/macros.h"
#include "main/shaderobj.h"
-#include "main/uniforms.h"
#include "main/fbobject.h"
#include "program/prog_parameter.h"
#include "program/prog_print.h"
#include "brw_wm.h"
}
#include "brw_fs.h"
+#include "main/uniforms.h"
+#include "brw_fs_live_variables.h"
#include "glsl/glsl_types.h"
-#include "glsl/ir_print_visitor.h"
void
fs_inst::init()
ALU2(SHR)
ALU2(ASR)
ALU3(LRP)
+ALU1(BFREV)
+ALU3(BFE)
+ALU2(BFI1)
+ALU3(BFI2)
+ALU1(FBH)
+ALU1(FBL)
+ALU1(CBIT)
+ALU3(MAD)
+ALU2(ADDC)
+ALU2(SUBB)
/** Gen4 predicated IF. */
fs_inst *
fs_inst *
fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition)
{
- assert(intel->gen >= 6);
+ assert(brw->gen >= 6);
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF,
reg_null_d, src0, src1);
inst->conditional_mod = condition;
* mostly work out for float-interpreted-as-int since our comparisons are
* for >0, =0, <0.
*/
- if (intel->gen == 4) {
+ if (brw->gen == 4) {
dst.type = src0.type;
- if (dst.file == FIXED_HW_REG)
+ if (dst.file == HW_REG)
dst.fixed_hw_reg.type = dst.type;
}
varying_offset, const_offset & ~3));
int scale = 1;
- if (intel->gen == 4 && dispatch_width == 8) {
+ if (brw->gen == 4 && dispatch_width == 8) {
/* Pre-gen5, we can either use a SIMD8 message that requires (header,
* u, v, r) as parameters, or we can just use the SIMD16 message
* consisting of (header, u). We choose the second, at the cost of a
}
enum opcode op;
- if (intel->gen >= 7)
+ if (brw->gen >= 7)
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
else
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
inst->regs_written = 4 * scale;
instructions.push_tail(inst);
- if (intel->gen < 7) {
+ if (brw->gen < 7) {
inst->base_mrf = 13;
inst->header_present = true;
- if (intel->gen == 4)
+ if (brw->gen == 4)
inst->mlen = 3;
else
inst->mlen = 1 + dispatch_width / 8;
return (opcode == FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7 ||
opcode == SHADER_OPCODE_SHADER_TIME_ADD ||
(opcode == FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD &&
- src[1].file == GRF));
+ src[1].file == GRF) ||
+ (is_tex() && src[0].file == GRF));
}
bool
fs_visitor::can_do_source_mods(fs_inst *inst)
{
- if (intel->gen == 6 && inst->is_math())
+ if (brw->gen == 6 && inst->is_math())
return false;
if (inst->is_send_from_grf())
return false;
+ if (!inst->can_do_source_mods())
+ return false;
+
return true;
}
fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
{
init();
- this->file = FIXED_HW_REG;
+ this->file = HW_REG;
this->fixed_hw_reg = fixed_hw_reg;
this->type = fixed_hw_reg.type;
}
imm.u == r.imm.u);
}
+fs_reg
+fs_reg::retype(uint32_t type)
+{
+ fs_reg result = *this;
+ result.type = type;
+ return result;
+}
+
bool
fs_reg::is_zero() const
{
return type == BRW_REGISTER_TYPE_F ? imm.f == 1.0 : imm.i == 1;
}
+bool
+fs_reg::is_null() const
+{
+ return file == HW_REG &&
+ fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ fixed_hw_reg.nr == BRW_ARF_NULL;
+}
+
+bool
+fs_reg::is_valid_3src() const
+{
+ return file == GRF || file == UNIFORM;
+}
+
int
fs_visitor::type_size(const struct glsl_type *type)
{
* link time.
*/
return 0;
+ case GLSL_TYPE_ATOMIC_UINT:
+ return 0;
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
fs_reg
fs_visitor::get_timestamp()
{
- assert(intel->gen >= 7);
+ assert(brw->gen >= 7);
fs_reg ts = fs_reg(retype(brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
BRW_ARF_TIMESTAMP,
bool
fs_inst::is_partial_write()
{
- return (this->predicate ||
+ return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
this->force_uncompressed ||
this->force_sechalf);
}
+int
+fs_inst::regs_read(fs_visitor *v, int arg)
+{
+ if (is_tex() && arg == 0 && src[0].file == GRF) {
+ if (v->dispatch_width == 16)
+ return (mlen + 1) / 2;
+ else
+ return mlen;
+ }
+ return 1;
+}
+
+bool
+fs_inst::reads_flag()
+{
+ return predicate;
+}
+
+bool
+fs_inst::writes_flag()
+{
+ return (conditional_mod && opcode != BRW_OPCODE_SEL) ||
+ opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
+}
+
/**
* Returns how many MRFs an FS opcode will write over.
*
if (inst->mlen == 0)
return 0;
+ if (inst->base_mrf == -1)
+ return 0;
+
switch (inst->opcode) {
case SHADER_OPCODE_RCP:
case SHADER_OPCODE_RSQ:
case SHADER_OPCODE_TXD:
case SHADER_OPCODE_TXF:
case SHADER_OPCODE_TXF_MS:
+ case SHADER_OPCODE_TG4:
+ case SHADER_OPCODE_TG4_OFFSET:
case SHADER_OPCODE_TXL:
case SHADER_OPCODE_TXS:
case SHADER_OPCODE_LOD:
case FS_OPCODE_FB_WRITE:
return 2;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
- case FS_OPCODE_UNSPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
return inst->mlen;
- case FS_OPCODE_SPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
return 2;
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ return 0;
default:
assert(!"not reached");
return inst->mlen;
import_uniforms_callback,
variable_ht);
this->params_remap = v->params_remap;
+ this->nr_params_remap = v->nr_params_remap;
}
/* Our support for uniforms is piggy-backed on the struct
wpos.reg_offset++;
/* gl_FragCoord.z */
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
} else {
emit(FS_OPCODE_LINTERP, wpos,
bool is_centroid)
{
brw_wm_barycentric_interp_mode barycoord_mode;
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
if (is_centroid) {
if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
int location = ir->location;
for (unsigned int i = 0; i < array_elements; i++) {
for (unsigned int j = 0; j < type->matrix_columns; j++) {
- if (urb_setup[location] == -1) {
+ if (c->prog_data.urb_setup[location] == -1) {
/* If there's no incoming setup data for this slot, don't
* emit interpolation for it.
*/
inst->predicate = BRW_PREDICATE_NORMAL;
inst->predicate_inverse = true;
}
- if (intel->gen < 6) {
+ if (brw->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
}
attr.reg_offset++;
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
/* The frontfacing comes in as a bit in the thread payload. */
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
emit(BRW_OPCODE_ASR, *reg,
fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
fs_reg(15));
return reg;
}
+void
+fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
+{
+ assert(dst.type == BRW_REGISTER_TYPE_F);
+
+ if (c->key.compute_pos_offset) {
+ /* Convert int_sample_pos to floating point */
+ emit(MOV(dst, int_sample_pos));
+ /* Scale to the range [0, 1] */
+ emit(MUL(dst, dst, fs_reg(1 / 16.0f)));
+ }
+ else {
+ /* From ARB_sample_shading specification:
+ * "When rendering to a non-multisample buffer, or if multisample
+ * rasterization is disabled, gl_SamplePosition will always be
+ * (0.5, 0.5).
+ */
+ emit(MOV(dst, fs_reg(0.5f)));
+ }
+}
+
+fs_reg *
+fs_visitor::emit_samplepos_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 6);
+ assert(ir->type == glsl_type::vec2_type);
+
+ this->current_annotation = "compute sample position";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+ fs_reg pos = *reg;
+ fs_reg int_sample_x = fs_reg(this, glsl_type::int_type);
+ fs_reg int_sample_y = fs_reg(this, glsl_type::int_type);
+
+ /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
+ * mode will be enabled.
+ *
+ * From the Ivy Bridge PRM, volume 2 part 1, page 344:
+ * R31.1:0 Position Offset X/Y for Slot[3:0]
+ * R31.3:2 Position Offset X/Y for Slot[7:4]
+ * .....
+ *
+ * The X, Y sample positions come in as bytes in thread payload. So, read
+ * the positions using vstride=16, width=8, hstride=2.
+ */
+ struct brw_reg sample_pos_reg =
+ stride(retype(brw_vec1_grf(c->sample_pos_reg, 0),
+ BRW_REGISTER_TYPE_B), 16, 8, 2);
+
+ emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+ if (dispatch_width == 16) {
+ int_sample_x.sechalf = true;
+ fs_inst *inst = emit(MOV(int_sample_x,
+ fs_reg(suboffset(sample_pos_reg, 16))));
+ inst->force_sechalf = true;
+ int_sample_x.sechalf = false;
+ }
+ /* Compute gl_SamplePosition.x */
+ compute_sample_position(pos, int_sample_x);
+ pos.reg_offset++;
+ emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+ if (dispatch_width == 16) {
+ int_sample_y.sechalf = true;
+ fs_inst *inst = emit(MOV(int_sample_y,
+ fs_reg(suboffset(sample_pos_reg, 17))));
+ inst->force_sechalf = true;
+ int_sample_y.sechalf = false;
+ }
+ /* Compute gl_SamplePosition.y */
+ compute_sample_position(pos, int_sample_y);
+ return reg;
+}
+
fs_reg
fs_visitor::fix_math_operand(fs_reg src)
{
* The hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up.
*/
- if (intel->gen == 6 && src.file != UNIFORM && src.file != IMM &&
+ if (brw->gen == 6 && src.file != UNIFORM && src.file != IMM &&
!src.abs && !src.negate)
return src;
/* Gen7 relaxes most of the above restrictions, but still can't use IMM
* operands to math
*/
- if (intel->gen >= 7 && src.file != IMM)
+ if (brw->gen >= 7 && src.file != IMM)
return src;
fs_reg expanded = fs_reg(this, glsl_type::float_type);
* Gen 6 hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up.
*/
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
src = fix_math_operand(src);
fs_inst *inst = emit(opcode, dst, src);
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
inst->base_mrf = 2;
inst->mlen = dispatch_width / 8;
}
switch (opcode) {
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
- if (intel->gen >= 7 && dispatch_width == 16)
+ if (brw->gen >= 7 && dispatch_width == 16)
fail("16-wide INTDIV unsupported\n");
break;
case SHADER_OPCODE_POW:
return NULL;
}
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
src0 = fix_math_operand(src0);
src1 = fix_math_operand(src1);
constant_nr / 8,
constant_nr % 8);
- inst->src[i].file = FIXED_HW_REG;
+ inst->src[i].file = HW_REG;
inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
}
}
fs_visitor::calculate_urb_setup()
{
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- urb_setup[i] = -1;
+ c->prog_data.urb_setup[i] = -1;
}
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
- if (intel->gen >= 6) {
- for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
- urb_setup[i] = urb_next++;
- }
+ if (brw->gen >= 6) {
+ if (_mesa_bitcount_64(fp->Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) <= 16) {
+ /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
+ * first 16 varying inputs, so we can put them wherever we want.
+ * Just put them in order.
+ *
+ * This is useful because it means that (a) inputs not used by the
+ * fragment shader won't take up valuable register space, and (b) we
+ * won't have to recompile the fragment shader if it gets paired with
+ * a different vertex (or geometry) shader.
+ */
+ for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
+ if (fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
+ BITFIELD64_BIT(i)) {
+ c->prog_data.urb_setup[i] = urb_next++;
+ }
+ }
+ } else {
+ /* We have enough input varyings that the SF/SBE pipeline stage can't
+ * arbitrarily rearrange them to suit our whim; we have to put them
+ * in an order that matches the output of the previous pipeline stage
+ * (geometry or vertex shader).
+ */
+ struct brw_vue_map prev_stage_vue_map;
+ brw_compute_vue_map(brw, &prev_stage_vue_map,
+ c->key.input_slots_valid);
+ int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
+ assert(prev_stage_vue_map.num_slots <= first_slot + 32);
+ for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
+ slot++) {
+ int varying = prev_stage_vue_map.slot_to_varying[slot];
+ /* Note that varying == BRW_VARYING_SLOT_COUNT when a slot is
+ * unused.
+ */
+ if (varying != BRW_VARYING_SLOT_COUNT &&
+ (fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
+ BITFIELD64_BIT(varying))) {
+ c->prog_data.urb_setup[varying] = slot - first_slot;
+ }
+ }
+ urb_next = prev_stage_vue_map.num_slots - first_slot;
}
} else {
/* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
* incremented, mapped or not.
*/
if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
- urb_setup[i] = urb_next;
+ c->prog_data.urb_setup[i] = urb_next;
urb_next++;
}
}
* See compile_sf_prog() for more info.
*/
if (fp->Base.InputsRead & BITFIELD64_BIT(VARYING_SLOT_PNTC))
- urb_setup[VARYING_SLOT_PNTC] = urb_next++;
+ c->prog_data.urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
- /* Each attribute is 4 setup channels, each of which is half a reg. */
- c->prog_data.urb_read_length = urb_next * 2;
+ c->prog_data.num_varying_inputs = urb_next;
}
void
fs_inst *inst = (fs_inst *)node;
if (inst->opcode == FS_OPCODE_LINTERP) {
- assert(inst->src[2].file == FIXED_HW_REG);
+ assert(inst->src[2].file == HW_REG);
inst->src[2].fixed_hw_reg.nr += urb_start;
}
if (inst->opcode == FS_OPCODE_CINTERP) {
- assert(inst->src[0].file == FIXED_HW_REG);
+ assert(inst->src[0].file == HW_REG);
inst->src[0].fixed_hw_reg.nr += urb_start;
}
}
- this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
+ /* Each attribute is 4 setup channels, each of which is half a reg. */
+ this->first_non_payload_grf =
+ urb_start + c->prog_data.num_varying_inputs * 2;
}
/**
* the send is reading the whole thing.
*/
if (inst->is_send_from_grf()) {
- split_grf[inst->src[0].reg] = false;
+ for (int i = 0; i < 3; i++) {
+ if (inst->src[i].file == GRF) {
+ split_grf[inst->src[i].reg] = false;
+ }
+ }
}
}
}
}
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}
/**
if (remap_table[i] != -1) {
remap_table[i] = new_index;
virtual_grf_sizes[new_index] = virtual_grf_sizes[i];
- if (live_intervals_valid) {
- virtual_grf_use[new_index] = virtual_grf_use[i];
- virtual_grf_def[new_index] = virtual_grf_def[i];
- }
+ invalidate_live_intervals();
++new_index;
}
}
{
if (dispatch_width == 8) {
this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
+ this->nr_params_remap = c->prog_data.nr_params;
for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
this->params_remap[i] = -1;
if (inst->src[i].file != UNIFORM)
continue;
- assert(constant_nr < (int)c->prog_data.nr_params);
+ /* Section 5.11 of the OpenGL 4.3 spec says:
+ *
+ * "Out-of-bounds reads return undefined values, which include
+ * values from other variables of the active program or zero."
+ */
+ if (constant_nr < 0 || constant_nr >= (int)c->prog_data.nr_params) {
+ constant_nr = 0;
+ }
/* For now, set this to non-negative. We'll give it the
* actual new number in a moment, in order to keep the
if (inst->src[i].file != UNIFORM)
continue;
+ /* as above alias to 0 */
+ if (constant_nr < 0 || constant_nr >= (int)this->nr_params_remap) {
+ constant_nr = 0;
+ }
assert(this->params_remap[constant_nr] != -1);
inst->src[i].reg = this->params_remap[constant_nr];
inst->src[i].reg_offset = 0;
base_ir = inst->ir;
current_annotation = inst->annotation;
- fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg surf_index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg temp = fs_reg(this, glsl_type::float_type);
exec_list list = VARYING_PULL_CONSTANT_LOAD(temp,
surf_index,
assert(!inst->src[i].reladdr);
fs_reg dst = fs_reg(this, glsl_type::float_type);
- fs_reg index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
fs_inst *pull =
new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
break;
}
break;
+ case BRW_OPCODE_OR:
+ if (inst->src[0].equals(inst->src[1])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ break;
+ }
+ break;
+ case BRW_OPCODE_SEL:
+ if (inst->saturate && inst->src[1].file == IMM) {
+ switch (inst->conditional_mod) {
+ case BRW_CONDITIONAL_LE:
+ case BRW_CONDITIONAL_L:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f >= 1.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case BRW_CONDITIONAL_GE:
+ case BRW_CONDITIONAL_G:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f <= 0.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ inst->conditional_mod = BRW_CONDITIONAL_NONE;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
default:
break;
}
}
/**
- * Must be called after calculate_live_intervales() to remove unused
- * writes to registers -- register allocation will fail otherwise
- * because something deffed but not used won't be considered to
- * interfere with other regs.
+ * Removes any instructions writing a VGRF where that VGRF is not used by any
+ * later instruction.
*/
bool
fs_visitor::dead_code_eliminate()
foreach_list_safe(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
- if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
- inst->remove();
- progress = true;
+ if (inst->dst.file == GRF) {
+ bool dead = true;
+
+ for (int i = 0; i < inst->regs_written; i++) {
+ int var = live_intervals->var_from_vgrf[inst->dst.reg];
+ assert(live_intervals->end[var + inst->dst.reg_offset + i] >= pc);
+ if (live_intervals->end[var + inst->dst.reg_offset + i] != pc) {
+ dead = false;
+ break;
+ }
+ }
+
+ if (dead) {
+ /* Don't dead code eliminate instructions that write to the
+ * accumulator as a side-effect. Instead just set the destination
+ * to the null register to free it.
+ */
+ switch (inst->opcode) {
+ case BRW_OPCODE_ADDC:
+ case BRW_OPCODE_SUBB:
+ case BRW_OPCODE_MACH:
+ inst->dst = fs_reg(retype(brw_null_reg(), inst->dst.type));
+ break;
+ default:
+ inst->remove();
+ progress = true;
+ break;
+ }
+ }
}
pc++;
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
_mesa_hash_table_destroy(ht, NULL);
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
inst->src[0].smear != -1 ||
inst->dst.file != GRF ||
inst->dst.type != inst->src[0].type ||
- virtual_grf_sizes[inst->src[0].reg] != 1 ||
- virtual_grf_interferes(inst->dst.reg, inst->src[0].reg)) {
+ virtual_grf_sizes[inst->src[0].reg] != 1) {
continue;
}
+ int var_from = live_intervals->var_from_reg(&inst->src[0]);
+ int var_to = live_intervals->var_from_reg(&inst->dst);
+
+ if (live_intervals->vars_interfere(var_from, var_to))
+ continue;
+
int reg_from = inst->src[0].reg;
assert(inst->src[0].reg_offset == 0);
int reg_to = inst->dst.reg;
}
inst->remove();
-
- /* We don't need to recalculate live intervals inside the loop despite
- * flagging live_intervals_valid because we only use live intervals for
- * the interferes test, and we must have had a situation where the
- * intervals were:
- *
- * from to
- * ^
- * |
- * v
- * ^
- * |
- * v
- *
- * Some register R that might get coalesced with one of these two could
- * only be referencing "to", otherwise "from"'s range would have been
- * longer. R's range could also only start at the end of "to" or later,
- * otherwise it will conflict with "to" when we try to coalesce "to"
- * into Rw anyway.
- */
- live_intervals_valid = false;
-
progress = true;
continue;
}
+ if (progress)
+ invalidate_live_intervals();
+
return progress;
}
}
}
+ if (has_source_modifiers) {
+ for (int i = 0; i < 3; i++) {
+ if (scan_inst->src[i].file == GRF &&
+ scan_inst->src[i].reg == inst->dst.reg &&
+ scan_inst->src[i].reg_offset == inst->dst.reg_offset &&
+ inst->dst.type != scan_inst->src[i].type)
+ {
+ interfered = true;
+ break;
+ }
+ }
+ }
+
+
/* The gen6 MATH instruction can't handle source modifiers or
* unusual register regions, so avoid coalescing those for
* now. We should do something more specific.
break;
}
+ if (scan_inst->mlen > 0 && scan_inst->base_mrf == -1 &&
+ scan_inst->src[0].file == GRF &&
+ scan_inst->src[0].reg == inst->dst.reg) {
+ interfered = true;
+ break;
+ }
+
/* The accumulator result appears to get used for the
* conditional modifier generation. When negating a UD
* value, there is a 33rd bit generated for the sign in the
new_src.abs = 1;
}
new_src.negate ^= scan_inst->src[i].negate;
+ new_src.sechalf = scan_inst->src[i].sechalf;
scan_inst->src[i] = new_src;
}
}
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
/* Can't compute-to-MRF this GRF if someone else was going to
* read it later.
*/
- if (this->virtual_grf_use[inst->src[0].reg] > ip)
+ if (this->virtual_grf_end[inst->src[0].reg] > ip)
continue;
/* Found a move of a GRF to a MRF. Let's see if we can go
if (scan_inst->mlen)
break;
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
/* gen6 math instructions must have the destination be
* GRF, so no compute-to-MRF for them.
*/
}
}
- if (scan_inst->mlen > 0) {
+ if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
/* Found a SEND instruction, which means that there are
* live values in MRFs from base_mrf to base_mrf +
* scan_inst->mlen - 1. Don't go pushing our MRF write up
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
last_mrf_move[inst->dst.reg] = NULL;
}
- if (inst->mlen > 0) {
+ if (inst->mlen > 0 && inst->base_mrf != -1) {
/* Found a SEND instruction, which will include two or fewer
* implied MRF writes. We could do better here.
*/
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
int grf;
if (inst->src[i].file == GRF) {
grf = inst->src[i].reg;
- } else if (inst->src[i].file == FIXED_HW_REG &&
+ } else if (inst->src[i].file == HW_REG &&
inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
grf = inst->src[i].fixed_hw_reg.nr;
} else {
void
fs_visitor::insert_gen4_send_dependency_workarounds()
{
- if (intel->gen != 4 || intel->is_g4x)
+ if (brw->gen != 4 || brw->is_g4x)
return;
/* Note that we're done with register allocation, so GRF fs_regs always
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
continue;
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
/* The offset arg before was a vec4-aligned byte offset. We need to
* turn it into a dword offset.
*/
inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
inst->src[1] = payload;
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
} else {
/* Before register allocation, we didn't tell the scheduler about the
* MRF we use. We know it's safe to use this MRF because nothing
}
void
-fs_visitor::dump_instruction(fs_inst *inst)
+fs_visitor::dump_instruction(backend_instruction *be_inst)
{
+ fs_inst *inst = (fs_inst *)be_inst;
+
if (inst->predicate) {
printf("(%cf0.%d) ",
inst->predicate_inverse ? '-' : '+',
if (inst->conditional_mod) {
printf(".cmod");
if (!inst->predicate &&
- (intel->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
+ (brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
- printf(".f0.%d\n", inst->flag_subreg);
+ printf(".f0.%d", inst->flag_subreg);
}
}
printf(" ");
case UNIFORM:
printf("***u%d***", inst->dst.reg);
break;
+ case HW_REG:
+ printf("hw_reg%d", inst->dst.fixed_hw_reg.nr);
+ if (inst->dst.fixed_hw_reg.subnr)
+ printf("+%d", inst->dst.fixed_hw_reg.subnr);
+ break;
default:
printf("???");
break;
break;
}
break;
+ case HW_REG:
+ if (inst->src[i].fixed_hw_reg.negate)
+ printf("-");
+ if (inst->src[i].fixed_hw_reg.abs)
+ printf("|");
+ printf("hw_reg%d", inst->src[i].fixed_hw_reg.nr);
+ if (inst->src[i].fixed_hw_reg.subnr)
+ printf("+%d", inst->src[i].fixed_hw_reg.subnr);
+ if (inst->src[i].fixed_hw_reg.abs)
+ printf("|");
+ break;
default:
printf("???");
break;
printf("\n");
}
-void
-fs_visitor::dump_instructions()
-{
- int ip = 0;
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
- printf("%d: ", ip++);
- dump_instruction(inst);
- }
-}
-
/**
* Possibly returns an instruction that set up @param reg.
*
(fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0;
unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
- assert(intel->gen >= 6);
+ assert(brw->gen >= 6);
/* R0-1: masks, pixel X/Y coordinates. */
c->nr_payload_regs = 2;
c->nr_payload_regs++;
}
}
+
+ c->prog_data.uses_pos_offset = c->key.compute_pos_offset;
/* R31: MSAA position offsets. */
+ if (c->prog_data.uses_pos_offset) {
+ c->sample_pos_reg = c->nr_payload_regs;
+ c->nr_payload_regs++;
+ }
+
/* R32-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
}
}
+void
+fs_visitor::assign_binding_table_offsets()
+{
+ uint32_t next_binding_table_offset = 0;
+
+ c->prog_data.binding_table.render_target_start = next_binding_table_offset;
+ next_binding_table_offset += c->key.nr_color_regions;
+
+ assign_common_binding_table_offsets(next_binding_table_offset);
+}
+
bool
fs_visitor::run()
{
sanity_param_count = fp->Base.Parameters->NumParameters;
uint32_t orig_nr_params = c->prog_data.nr_params;
- if (intel->gen >= 6)
+ assign_binding_table_offsets();
+
+ if (brw->gen >= 6)
setup_payload_gen6();
else
setup_payload_gen4();
emit_shader_time_begin();
calculate_urb_setup();
- if (intel->gen < 6)
- emit_interpolation_setup_gen4();
- else
- emit_interpolation_setup_gen6();
+ if (fp->Base.InputsRead > 0) {
+ if (brw->gen < 6)
+ emit_interpolation_setup_gen4();
+ else
+ emit_interpolation_setup_gen6();
+ }
/* We handle discards by keeping track of the still-live pixels in f0.1.
* Initialize it with the dispatched pixels.
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
+ remove_dead_constants();
setup_pull_constants();
bool progress;
progress = compute_to_mrf() || progress;
} while (progress);
- remove_dead_constants();
-
schedule_instructions(false);
lower_uniform_pull_constant_loads();
assign_curb_setup();
assign_urb_setup();
- if (0) {
- /* Debug of register spilling: Go spill everything. */
- for (int i = 0; i < virtual_grf_count; i++) {
- spill_reg(i);
- }
- }
-
if (0)
assign_regs_trivial();
else {
struct gl_shader_program *prog,
unsigned *final_assembly_size)
{
- struct intel_context *intel = &brw->intel;
bool start_busy = false;
float start_time = 0;
- if (unlikely(intel->perf_debug)) {
- start_busy = (intel->batch.last_bo &&
- drm_intel_bo_busy(intel->batch.last_bo));
+ if (unlikely(brw->perf_debug)) {
+ start_busy = (brw->batch.last_bo &&
+ drm_intel_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
exec_list *simd16_instructions = NULL;
fs_visitor v2(brw, c, prog, fp, 16);
- bool no16 = INTEL_DEBUG & DEBUG_NO16;
- if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) {
- v2.import_uniforms(&v);
- if (!v2.run()) {
- perf_debug("16-wide shader failed to compile, falling back to "
- "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
+ if (brw->gen >= 5 && likely(!(INTEL_DEBUG & DEBUG_NO16))) {
+ if (c->prog_data.nr_pull_params == 0) {
+ /* Try a 16-wide compile */
+ v2.import_uniforms(&v);
+ if (!v2.run()) {
+ perf_debug("16-wide shader failed to compile, falling back to "
+ "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
+ } else {
+ simd16_instructions = &v2.instructions;
+ }
} else {
- simd16_instructions = &v2.instructions;
+ perf_debug("Skipping 16-wide due to pull parameters.\n");
}
}
- c->prog_data.dispatch_width = 8;
-
fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
const unsigned *generated = g.generate_assembly(&v.instructions,
simd16_instructions,
final_assembly_size);
- if (unlikely(intel->perf_debug) && shader) {
+ if (unlikely(brw->perf_debug) && shader) {
if (shader->compiled_once)
brw_wm_debug_recompile(brw, prog, &c->key);
shader->compiled_once = true;
- if (start_busy && !drm_intel_bo_busy(intel->batch.last_bo)) {
+ if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
struct brw_wm_prog_key key;
if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT])
memset(&key, 0, sizeof(key));
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
if (fp->UsesKill)
key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
}
- if (intel->gen < 6)
- key.input_slots_valid |= BITFIELD64_BIT(VARYING_SLOT_POS);
-
- for (int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
- continue;
-
- if (intel->gen < 6) {
- if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
- key.input_slots_valid |= BITFIELD64_BIT(i);
- }
- }
+ if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) > 16)
+ key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;
key.clamp_fragment_color = ctx->API == API_OPENGL_COMPAT;
- for (int i = 0; i < MAX_SAMPLERS; i++) {
+ unsigned sampler_count = _mesa_fls(fp->Base.SamplersUsed);
+ for (unsigned i = 0; i < sampler_count; i++) {
if (fp->Base.ShadowSamplers & (1 << i)) {
/* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
key.tex.swizzles[i] =
key.nr_color_regions = 1;
+ /* GL_FRAGMENT_SHADER_DERIVATIVE_HINT is almost always GL_DONT_CARE. The
+ * quality of the derivatives is likely to be determined by the driconf
+ * option.
+ */
+ key.high_quality_derivatives = brw->disable_derivative_optimization;
+
key.program_string_id = bfp->id;
- uint32_t old_prog_offset = brw->wm.prog_offset;
+ uint32_t old_prog_offset = brw->wm.base.prog_offset;
struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
bool success = do_wm_prog(brw, prog, bfp, &key);
- brw->wm.prog_offset = old_prog_offset;
+ brw->wm.base.prog_offset = old_prog_offset;
brw->wm.prog_data = old_prog_data;
return success;