#include "brw_wm.h"
}
#include "brw_fs.h"
+#include "brw_dead_control_flow.h"
#include "main/uniforms.h"
+#include "brw_fs_live_variables.h"
#include "glsl/glsl_types.h"
void
fs_inst::init()
{
memset(this, 0, sizeof(*this));
- this->opcode = BRW_OPCODE_NOP;
this->conditional_mod = BRW_CONDITIONAL_NONE;
this->dst = reg_undef;
fs_inst::fs_inst()
{
init();
+ this->opcode = BRW_OPCODE_NOP;
}
fs_inst::fs_inst(enum opcode opcode)
ALU3(MAD)
ALU2(ADDC)
ALU2(SUBB)
+ALU2(SEL)
/** Gen4 predicated IF. */
fs_inst *
return inst;
}
-/** Gen6+ IF with embedded comparison. */
+/** Gen6 IF with embedded comparison. */
fs_inst *
fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition)
{
- assert(brw->gen >= 6);
+ assert(brw->gen == 6);
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF,
reg_null_d, src0, src1);
inst->conditional_mod = condition;
}
exec_list
-fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index,
- fs_reg varying_offset,
+fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_reg &dst,
+ const fs_reg &surf_index,
+ const fs_reg &varying_offset,
uint32_t const_offset)
{
exec_list instructions;
return (opcode == FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7 ||
opcode == SHADER_OPCODE_SHADER_TIME_ADD ||
(opcode == FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD &&
- src[1].file == GRF));
+ src[1].file == GRF) ||
+ (is_tex() && src[0].file == GRF));
}
bool
if (inst->is_send_from_grf())
return false;
+ if (!inst->can_do_source_mods())
+ return false;
+
return true;
}
fs_reg::init()
{
memset(this, 0, sizeof(*this));
- this->smear = -1;
+ stride = 1;
}
/** Generic unset register constructor. */
this->imm.u = u;
}
-/** Fixed brw_reg Immediate value constructor. */
+/** Fixed brw_reg. */
fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
{
init();
return (file == r.file &&
reg == r.reg &&
reg_offset == r.reg_offset &&
+ subreg_offset == r.subreg_offset &&
type == r.type &&
negate == r.negate &&
abs == r.abs &&
!reladdr && !r.reladdr &&
memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
sizeof(fixed_hw_reg)) == 0 &&
- smear == r.smear &&
+ stride == r.stride &&
imm.u == r.imm.u);
}
+fs_reg &
+fs_reg::apply_stride(unsigned stride)
+{
+ assert((this->stride * stride) <= 4 &&
+ (is_power_of_two(stride) || stride == 0) &&
+ file != HW_REG && file != IMM);
+ this->stride *= stride;
+ return *this;
+}
+
+fs_reg &
+fs_reg::set_smear(unsigned subreg)
+{
+ assert(file != HW_REG && file != IMM);
+ subreg_offset = subreg * type_sz(type);
+ stride = 0;
+ return *this;
+}
+
+bool
+fs_reg::is_contiguous() const
+{
+ return stride == 1;
+}
+
bool
fs_reg::is_zero() const
{
return type == BRW_REGISTER_TYPE_F ? imm.f == 1.0 : imm.i == 1;
}
+bool
+fs_reg::is_null() const
+{
+ return file == HW_REG &&
+ fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ fixed_hw_reg.nr == BRW_ARF_NULL;
+}
+
bool
fs_reg::is_valid_3src() const
{
* link time.
*/
return 0;
+ case GLSL_TYPE_ATOMIC_UINT:
+ return 0;
+ case GLSL_TYPE_IMAGE:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
* else that might disrupt timing) by setting smear to 2 and checking if
* that field is != 0.
*/
- dst.smear = 0;
+ dst.set_smear(0);
return dst;
}
* were the only two timestamp reads that happened).
*/
fs_reg reset = shader_end_time;
- reset.smear = 2;
+ reset.set_smear(2);
fs_inst *test = emit(AND(reg_null_d, reset, fs_reg(1u)));
test->conditional_mod = BRW_CONDITIONAL_Z;
emit(IF(BRW_PREDICATE_NORMAL));
else
payload = fs_reg(this, glsl_type::uint_type);
- emit(fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
- fs_reg(), payload, offset, value));
+ emit(new(mem_ctx) fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
+ fs_reg(), payload, offset, value));
}
void
fs_inst *
fs_visitor::emit(enum opcode opcode)
{
- return emit(fs_inst(opcode));
+ return emit(new(mem_ctx) fs_inst(opcode));
}
fs_inst *
fs_visitor::emit(enum opcode opcode, fs_reg dst)
{
- return emit(fs_inst(opcode, dst));
+ return emit(new(mem_ctx) fs_inst(opcode, dst));
}
fs_inst *
fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0)
{
- return emit(fs_inst(opcode, dst, src0));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0));
}
fs_inst *
fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
{
- return emit(fs_inst(opcode, dst, src0, src1));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1));
}
fs_inst *
fs_visitor::emit(enum opcode opcode, fs_reg dst,
fs_reg src0, fs_reg src1, fs_reg src2)
{
- return emit(fs_inst(opcode, dst, src0, src1, src2));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1, src2));
}
void
assert(force_uncompressed_stack >= 0);
}
-void
-fs_visitor::push_force_sechalf()
-{
- force_sechalf_stack++;
-}
-
-void
-fs_visitor::pop_force_sechalf()
-{
- force_sechalf_stack--;
- assert(force_sechalf_stack >= 0);
-}
-
/**
* Returns true if the instruction has a flag that means it won't
* update an entire destination register.
{
return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
this->force_uncompressed ||
- this->force_sechalf);
+ this->force_sechalf || !this->dst.is_contiguous());
+}
+
+int
+fs_inst::regs_read(fs_visitor *v, int arg)
+{
+ if (is_tex() && arg == 0 && src[0].file == GRF) {
+ if (v->dispatch_width == 16)
+ return (mlen + 1) / 2;
+ else
+ return mlen;
+ }
+ return 1;
+}
+
+bool
+fs_inst::reads_flag()
+{
+ return predicate;
+}
+
+bool
+fs_inst::writes_flag()
+{
+ return (conditional_mod && opcode != BRW_OPCODE_SEL) ||
+ opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
}
/**
if (inst->mlen == 0)
return 0;
+ if (inst->base_mrf == -1)
+ return 0;
+
switch (inst->opcode) {
case SHADER_OPCODE_RCP:
case SHADER_OPCODE_RSQ:
case FS_OPCODE_TXB:
case SHADER_OPCODE_TXD:
case SHADER_OPCODE_TXF:
- case SHADER_OPCODE_TXF_MS:
+ case SHADER_OPCODE_TXF_CMS:
+ case SHADER_OPCODE_TXF_MCS:
case SHADER_OPCODE_TG4:
+ case SHADER_OPCODE_TG4_OFFSET:
case SHADER_OPCODE_TXL:
case SHADER_OPCODE_TXS:
case SHADER_OPCODE_LOD:
case FS_OPCODE_FB_WRITE:
return 2;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
- case FS_OPCODE_UNSPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
return inst->mlen;
- case FS_OPCODE_SPILL:
+ case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
return 2;
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ return 0;
default:
assert(!"not reached");
return inst->mlen;
hash_table_insert(dst_ht, data, key);
}
-/* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
+/* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
* This brings in those uniform definitions
*/
void
* order we'd walk the type, so walk the list of storage and find anything
* with our name, or the prefix of a component that starts with our name.
*/
- unsigned params_before = c->prog_data.nr_params;
+ unsigned params_before = uniforms;
for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
slots *= storage->array_elements;
for (unsigned i = 0; i < slots; i++) {
- c->prog_data.param[c->prog_data.nr_params++] =
- &storage->storage[i].f;
+ stage_prog_data->param[uniforms++] = &storage->storage[i].f;
}
}
/* Make sure we actually initialized the right amount of stuff here. */
- assert(params_before + ir->type->component_slots() ==
- c->prog_data.nr_params);
+ assert(params_before + ir->type->component_slots() == uniforms);
(void)params_before;
}
break;
last_swiz = swiz;
- c->prog_data.param[c->prog_data.nr_params++] =
+ stage_prog_data->param[uniforms++] =
&fp->Base.Parameters->ParameterValues[index][swiz].f;
}
}
{
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
fs_reg wpos = *reg;
- bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
+ bool flip = !ir->data.origin_upper_left ^ c->key.render_to_fbo;
/* gl_FragCoord.x */
- if (ir->pixel_center_integer) {
+ if (ir->data.pixel_center_integer) {
emit(MOV(wpos, this->pixel_x));
} else {
emit(ADD(wpos, this->pixel_x, fs_reg(0.5f)));
wpos.reg_offset++;
/* gl_FragCoord.y */
- if (!flip && ir->pixel_center_integer) {
+ if (!flip && ir->data.pixel_center_integer) {
emit(MOV(wpos, this->pixel_y));
} else {
fs_reg pixel_y = this->pixel_y;
- float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
+ float offset = (ir->data.pixel_center_integer ? 0.0 : 0.5);
if (flip) {
pixel_y.negate = true;
fs_inst *
fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
glsl_interp_qualifier interpolation_mode,
- bool is_centroid)
+ bool is_centroid, bool is_sample)
{
brw_wm_barycentric_interp_mode barycoord_mode;
if (brw->gen >= 6) {
barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
else
barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
+ } else if (is_sample) {
+ if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
+ barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
+ else
+ barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
} else {
if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
glsl_interp_qualifier interpolation_mode =
ir->determine_interpolation_mode(c->key.flat_shade);
- int location = ir->location;
+ int location = ir->data.location;
for (unsigned int i = 0; i < array_elements; i++) {
for (unsigned int j = 0; j < type->matrix_columns; j++) {
if (c->prog_data.urb_setup[location] == -1) {
} else {
/* Smooth/noperspective interpolation case. */
for (unsigned int k = 0; k < type->vector_elements; k++) {
- /* FINISHME: At some point we probably want to push
- * this farther by giving similar treatment to the
- * other potentially constant components of the
- * attribute, as well as making brw_vs_constval.c
- * handle varyings other than gl_TexCoord.
- */
struct brw_reg interp = interp_reg(location, k);
emit_linterp(attr, fs_reg(interp), interpolation_mode,
- ir->centroid);
- if (brw->needs_unlit_centroid_workaround && ir->centroid) {
+ ir->data.centroid && !c->key.persample_shading,
+ ir->data.sample || c->key.persample_shading);
+ if (brw->needs_unlit_centroid_workaround && ir->data.centroid) {
/* Get the pixel/sample mask into f0 so that we know
* which pixels are lit. Then, for each channel that is
* unlit, replace the centroid data with non-centroid
*/
emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
fs_inst *inst = emit_linterp(attr, fs_reg(interp),
- interpolation_mode, false);
+ interpolation_mode,
+ false, false);
inst->predicate = BRW_PREDICATE_NORMAL;
inst->predicate_inverse = true;
}
return reg;
}
+void
+fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
+{
+ assert(dst.type == BRW_REGISTER_TYPE_F);
+
+ if (c->key.compute_pos_offset) {
+ /* Convert int_sample_pos to floating point */
+ emit(MOV(dst, int_sample_pos));
+ /* Scale to the range [0, 1] */
+ emit(MUL(dst, dst, fs_reg(1 / 16.0f)));
+ }
+ else {
+ /* From ARB_sample_shading specification:
+ * "When rendering to a non-multisample buffer, or if multisample
+ * rasterization is disabled, gl_SamplePosition will always be
+ * (0.5, 0.5).
+ */
+ emit(MOV(dst, fs_reg(0.5f)));
+ }
+}
+
+fs_reg *
+fs_visitor::emit_samplepos_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 6);
+ assert(ir->type == glsl_type::vec2_type);
+
+ this->current_annotation = "compute sample position";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+ fs_reg pos = *reg;
+ fs_reg int_sample_x = fs_reg(this, glsl_type::int_type);
+ fs_reg int_sample_y = fs_reg(this, glsl_type::int_type);
+
+ /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
+ * mode will be enabled.
+ *
+ * From the Ivy Bridge PRM, volume 2 part 1, page 344:
+ * R31.1:0 Position Offset X/Y for Slot[3:0]
+ * R31.3:2 Position Offset X/Y for Slot[7:4]
+ * .....
+ *
+ * The X, Y sample positions come in as bytes in thread payload. So, read
+ * the positions using vstride=16, width=8, hstride=2.
+ */
+ struct brw_reg sample_pos_reg =
+ stride(retype(brw_vec1_grf(c->sample_pos_reg, 0),
+ BRW_REGISTER_TYPE_B), 16, 8, 2);
+
+ emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+ if (dispatch_width == 16) {
+ fs_inst *inst = emit(MOV(half(int_sample_x, 1),
+ fs_reg(suboffset(sample_pos_reg, 16))));
+ inst->force_sechalf = true;
+ }
+ /* Compute gl_SamplePosition.x */
+ compute_sample_position(pos, int_sample_x);
+ pos.reg_offset++;
+ emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+ if (dispatch_width == 16) {
+ fs_inst *inst = emit(MOV(half(int_sample_y, 1),
+ fs_reg(suboffset(sample_pos_reg, 17))));
+ inst->force_sechalf = true;
+ }
+ /* Compute gl_SamplePosition.y */
+ compute_sample_position(pos, int_sample_y);
+ return reg;
+}
+
+fs_reg *
+fs_visitor::emit_sampleid_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 6);
+
+ this->current_annotation = "compute sample id";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+
+ if (c->key.compute_sample_id) {
+ fs_reg t1 = fs_reg(this, glsl_type::int_type);
+ fs_reg t2 = fs_reg(this, glsl_type::int_type);
+ t2.type = BRW_REGISTER_TYPE_UW;
+
+ /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
+ * 8x multisampling, subspan 0 will represent sample N (where N
+ * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
+ * 7. We can find the value of N by looking at R0.0 bits 7:6
+ * ("Starting Sample Pair Index (SSPI)") and multiplying by two
+ * (since samples are always delivered in pairs). That is, we
+ * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
+ * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
+ * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
+ * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
+ * populating a temporary variable with the sequence (0, 1, 2, 3),
+ * and then reading from it using vstride=1, width=4, hstride=0.
+ * These computations hold good for 4x multisampling as well.
+ */
+ emit(BRW_OPCODE_AND, t1,
+ fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
+ fs_reg(brw_imm_d(0xc0)));
+ emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
+ /* This works for both SIMD8 and SIMD16 */
+ emit(MOV(t2, brw_imm_v(0x3210)));
+ /* This special instruction takes care of setting vstride=1,
+ * width=4, hstride=0 of t2 during an ADD instruction.
+ */
+ emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
+ } else {
+ /* As per GL_ARB_sample_shading specification:
+ * "When rendering to a non-multisample buffer, or if multisample
+ * rasterization is disabled, gl_SampleID will always be zero."
+ */
+ emit(BRW_OPCODE_MOV, *reg, fs_reg(0));
+ }
+
+ return reg;
+}
+
+fs_reg *
+fs_visitor::emit_samplemaskin_setup(ir_variable *ir)
+{
+ assert(brw->gen >= 7);
+ this->current_annotation = "compute gl_SampleMaskIn";
+ fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
+ emit(MOV(*reg, fs_reg(retype(brw_vec8_grf(c->sample_mask_reg, 0), BRW_REGISTER_TYPE_D))));
+ return reg;
+}
+
fs_reg
fs_visitor::fix_math_operand(fs_reg src)
{
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
if (brw->gen >= 7 && dispatch_width == 16)
- fail("16-wide INTDIV unsupported\n");
+ fail("SIMD16 INTDIV unsupported\n");
break;
case SHADER_OPCODE_POW:
break;
void
fs_visitor::assign_curb_setup()
{
- c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
if (dispatch_width == 8) {
c->prog_data.first_curbe_grf = c->nr_payload_regs;
+ stage_prog_data->nr_params = uniforms;
} else {
c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
+ /* Make sure we didn't try to sneak in an extra uniform */
+ assert(uniforms == 0);
}
+ c->prog_data.curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
+
/* Map the offsets in the UNIFORM file to fixed HW regs. */
foreach_list(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
constant_nr % 8);
inst->src[i].file = HW_REG;
- inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
+ inst->src[i].fixed_hw_reg = byte_offset(
+ retype(brw_reg, inst->src[i].type),
+ inst->src[i].subreg_offset);
}
}
}
}
}
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}
/**
if (remap_table[i] != -1) {
remap_table[i] = new_index;
virtual_grf_sizes[new_index] = virtual_grf_sizes[i];
- if (live_intervals_valid) {
- virtual_grf_start[new_index] = virtual_grf_start[i];
- virtual_grf_end[new_index] = virtual_grf_end[i];
- }
+ invalidate_live_intervals();
++new_index;
}
}
fs_visitor::remove_dead_constants()
{
if (dispatch_width == 8) {
- this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
- this->nr_params_remap = c->prog_data.nr_params;
+ this->params_remap = ralloc_array(mem_ctx, int, uniforms);
+ this->nr_params_remap = uniforms;
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
+ for (unsigned int i = 0; i < uniforms; i++)
this->params_remap[i] = -1;
/* Find which params are still in use. */
* "Out-of-bounds reads return undefined values, which include
* values from other variables of the active program or zero."
*/
- if (constant_nr < 0 || constant_nr >= (int)c->prog_data.nr_params) {
+ if (constant_nr < 0 || constant_nr >= (int)uniforms) {
constant_nr = 0;
}
* now we don't care.
*/
unsigned int new_nr_params = 0;
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
+ for (unsigned int i = 0; i < uniforms; i++) {
if (this->params_remap[i] != -1) {
this->params_remap[i] = new_nr_params++;
}
}
/* Update the list of params to be uploaded to match our new numbering. */
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
+ for (unsigned int i = 0; i < uniforms; i++) {
int remapped = this->params_remap[i];
if (remapped == -1)
continue;
- c->prog_data.param[remapped] = c->prog_data.param[i];
+ stage_prog_data->param[remapped] = stage_prog_data->param[i];
}
- c->prog_data.nr_params = new_nr_params;
+ uniforms = new_nr_params;
} else {
- /* This should have been generated in the 8-wide pass already. */
+ /* This should have been generated in the SIMD8 pass already. */
assert(this->params_remap);
}
void
fs_visitor::move_uniform_array_access_to_pull_constants()
{
- int pull_constant_loc[c->prog_data.nr_params];
+ pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
+ for (unsigned int i = 0; i < uniforms; i++) {
pull_constant_loc[i] = -1;
}
* add it.
*/
if (pull_constant_loc[uniform] == -1) {
- const float **values = &c->prog_data.param[uniform];
-
- pull_constant_loc[uniform] = c->prog_data.nr_pull_params;
+ const float **values = &stage_prog_data->param[uniform];
assert(param_size[uniform]);
for (int j = 0; j < param_size[uniform]; j++) {
- c->prog_data.pull_param[c->prog_data.nr_pull_params++] =
+ pull_constant_loc[uniform + j] = stage_prog_data->nr_pull_params;
+
+ stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
values[j];
}
}
-
- /* Set up the annotation tracking for new generated instructions. */
- base_ir = inst->ir;
- current_annotation = inst->annotation;
-
- fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
- fs_reg temp = fs_reg(this, glsl_type::float_type);
- exec_list list = VARYING_PULL_CONSTANT_LOAD(temp,
- surf_index,
- *inst->src[i].reladdr,
- pull_constant_loc[uniform] +
- inst->src[i].reg_offset);
- inst->insert_before(&list);
-
- inst->src[i].file = temp.file;
- inst->src[i].reg = temp.reg;
- inst->src[i].reg_offset = temp.reg_offset;
- inst->src[i].reladdr = NULL;
}
}
+ demote_pull_constants(true);
+
+ ralloc_free(pull_constant_loc);
+ pull_constant_loc = NULL;
}
/**
{
/* Only allow 16 registers (128 uniform components) as push constants. */
unsigned int max_uniform_components = 16 * 8;
- if (c->prog_data.nr_params <= max_uniform_components)
- return;
-
- if (dispatch_width == 16) {
- fail("Pull constants not supported in 16-wide\n");
+ if (uniforms <= max_uniform_components)
return;
- }
/* Just demote the end of the list. We could probably do better
* here, demoting things that are rarely used in the program first.
*/
unsigned int pull_uniform_base = max_uniform_components;
- int pull_constant_loc[c->prog_data.nr_params];
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
+ pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+ for (unsigned int i = 0; i < uniforms; i++) {
if (i < pull_uniform_base) {
pull_constant_loc[i] = -1;
} else {
/* If our constant is already being uploaded for reladdr purposes,
* reuse it.
*/
- for (unsigned int j = 0; j < c->prog_data.nr_pull_params; j++) {
- if (c->prog_data.pull_param[j] == c->prog_data.param[i]) {
+ for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j++) {
+ if (stage_prog_data->pull_param[j] == stage_prog_data->param[i]) {
pull_constant_loc[i] = j;
break;
}
}
if (pull_constant_loc[i] == -1) {
- int pull_index = c->prog_data.nr_pull_params++;
- c->prog_data.pull_param[pull_index] = c->prog_data.param[i];
- pull_constant_loc[i] = pull_index;;
+ int pull_index = stage_prog_data->nr_pull_params++;
+ stage_prog_data->pull_param[pull_index] = stage_prog_data->param[i];
+ pull_constant_loc[i] = pull_index;
}
}
}
- c->prog_data.nr_params = pull_uniform_base;
+ uniforms = pull_uniform_base;
+ demote_pull_constants(false);
+}
+
+/**
+ * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
+ * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
+ */
+void
+fs_visitor::demote_pull_constants(bool reladdr_only)
+{
foreach_list(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
if (pull_index == -1)
continue;
- assert(!inst->src[i].reladdr);
+ /* Set up the annotation tracking for new generated instructions. */
+ base_ir = inst->ir;
+ current_annotation = inst->annotation;
- fs_reg dst = fs_reg(this, glsl_type::float_type);
- fs_reg index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
- fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
- fs_inst *pull =
- new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
- dst, index, offset);
- pull->ir = inst->ir;
- pull->annotation = inst->annotation;
+ fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
+ fs_reg dst = fs_reg(this, glsl_type::float_type);
- inst->insert_before(pull);
+ if (reladdr_only != (inst->src[i].reladdr != NULL))
+ continue;
- inst->src[i].file = GRF;
- inst->src[i].reg = dst.reg;
- inst->src[i].reg_offset = 0;
- inst->src[i].smear = pull_index & 3;
+ /* Generate a pull load into dst. */
+ if (inst->src[i].reladdr) {
+ exec_list list = VARYING_PULL_CONSTANT_LOAD(dst,
+ surf_index,
+ *inst->src[i].reladdr,
+ pull_index);
+ inst->insert_before(&list);
+ inst->src[i].reladdr = NULL;
+ } else {
+ fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
+ fs_inst *pull =
+ new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ dst, surf_index, offset);
+ inst->insert_before(pull);
+ inst->src[i].set_smear(pull_index & 3);
+ }
+
+ /* Rewrite the instruction to use the temporary VGRF. */
+ inst->src[i].file = GRF;
+ inst->src[i].reg = dst.reg;
+ inst->src[i].reg_offset = 0;
}
}
+ invalidate_live_intervals();
}
bool
break;
}
break;
+ case BRW_OPCODE_OR:
+ if (inst->src[0].equals(inst->src[1])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ break;
+ }
+ break;
+ case BRW_OPCODE_LRP:
+ if (inst->src[1].equals(inst->src[2])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[0] = inst->src[1];
+ inst->src[1] = reg_undef;
+ inst->src[2] = reg_undef;
+ progress = true;
+ break;
+ }
+ break;
+ case BRW_OPCODE_SEL:
+ if (inst->saturate && inst->src[1].file == IMM) {
+ switch (inst->conditional_mod) {
+ case BRW_CONDITIONAL_LE:
+ case BRW_CONDITIONAL_L:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f >= 1.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case BRW_CONDITIONAL_GE:
+ case BRW_CONDITIONAL_G:
+ switch (inst->src[1].type) {
+ case BRW_REGISTER_TYPE_F:
+ if (inst->src[1].imm.f <= 0.0f) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ inst->conditional_mod = BRW_CONDITIONAL_NONE;
+ progress = true;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
default:
break;
}
foreach_list_safe(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
- if (inst->dst.file == GRF) {
- assert(this->virtual_grf_end[inst->dst.reg] >= pc);
- if (this->virtual_grf_end[inst->dst.reg] == pc) {
+ if (inst->dst.file == GRF && !inst->has_side_effects()) {
+ bool dead = true;
+
+ for (int i = 0; i < inst->regs_written; i++) {
+ int var = live_intervals->var_from_vgrf[inst->dst.reg];
+ assert(live_intervals->end[var + inst->dst.reg_offset + i] >= pc);
+ if (live_intervals->end[var + inst->dst.reg_offset + i] != pc) {
+ dead = false;
+ break;
+ }
+ }
+
+ if (dead) {
/* Don't dead code eliminate instructions that write to the
* accumulator as a side-effect. Instead just set the destination
* to the null register to free it.
break;
default:
inst->remove();
+ progress = true;
break;
}
- progress = true;
}
}
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
ht = _mesa_hash_table_create(mem_ctx, dead_code_hash_compare);
+ if (ht == NULL) {
+ return false;
+ }
+
foreach_list_safe(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
get_dead_code_hash_entry(ht, inst->dst.reg,
inst->dst.reg_offset);
- if (inst->is_partial_write()) {
- /* For a partial write, we can't remove any previous dead code
- * candidate, since we're just modifying their result, but we can
- * be dead code eliminiated ourselves.
- */
- if (entry) {
- entry->data = inst;
+ if (entry) {
+ if (inst->is_partial_write()) {
+ /* For a partial write, we can't remove any previous dead code
+ * candidate, since we're just modifying their result.
+ */
} else {
- insert_dead_code_hash(ht, inst->dst.reg, inst->dst.reg_offset,
- inst);
- }
- } else {
- if (entry) {
/* We're completely updating a channel, and there was a
* previous write to the channel that wasn't read. Kill it!
*/
fs_inst *inst = (fs_inst *)entry->data;
inst->remove();
progress = true;
- _mesa_hash_table_remove(ht, entry);
}
+ _mesa_hash_table_remove(ht, entry);
+ }
+
+ if (!inst->has_side_effects())
insert_dead_code_hash(ht, inst->dst.reg, inst->dst.reg_offset,
inst);
- }
}
}
}
_mesa_hash_table_destroy(ht, NULL);
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
/**
- * Implements a second type of register coalescing: This one checks if
- * the two regs involved in a raw move don't interfere, in which case
- * they can both by stored in the same place and the MOV removed.
+ * Implements register coalescing: Checks if the two registers involved in a
+ * raw move don't interfere, in which case they can both be stored in the same
+ * place and the MOV removed.
+ *
+ * To do this, all uses of the source of the MOV in the shader are replaced
+ * with the destination of the MOV. For example:
+ *
+ * add vgrf3:F, vgrf1:F, vgrf2:F
+ * mov vgrf4:F, vgrf3:F
+ * mul vgrf5:F, vgrf5:F, vgrf4:F
+ *
+ * becomes
+ *
+ * add vgrf4:F, vgrf1:F, vgrf2:F
+ * mul vgrf5:F, vgrf5:F, vgrf4:F
*/
bool
-fs_visitor::register_coalesce_2()
+fs_visitor::register_coalesce()
{
bool progress = false;
calculate_live_intervals();
- foreach_list_safe(node, &this->instructions) {
+ int src_size = 0;
+ int channels_remaining = 0;
+ int reg_from = -1, reg_to = -1;
+ int reg_to_offset[MAX_SAMPLER_MESSAGE_SIZE];
+ fs_inst *mov[MAX_SAMPLER_MESSAGE_SIZE];
+
+ foreach_list(node, &this->instructions) {
fs_inst *inst = (fs_inst *)node;
if (inst->opcode != BRW_OPCODE_MOV ||
inst->src[0].file != GRF ||
inst->src[0].negate ||
inst->src[0].abs ||
- inst->src[0].smear != -1 ||
+ !inst->src[0].is_contiguous() ||
inst->dst.file != GRF ||
- inst->dst.type != inst->src[0].type ||
- virtual_grf_sizes[inst->src[0].reg] != 1 ||
- virtual_grf_interferes(inst->dst.reg, inst->src[0].reg)) {
+ inst->dst.type != inst->src[0].type) {
continue;
}
- int reg_from = inst->src[0].reg;
- assert(inst->src[0].reg_offset == 0);
- int reg_to = inst->dst.reg;
- int reg_to_offset = inst->dst.reg_offset;
-
- foreach_list(node, &this->instructions) {
- fs_inst *scan_inst = (fs_inst *)node;
+ if (virtual_grf_sizes[inst->src[0].reg] >
+ virtual_grf_sizes[inst->dst.reg])
+ continue;
- if (scan_inst->dst.file == GRF &&
- scan_inst->dst.reg == reg_from) {
- scan_inst->dst.reg = reg_to;
- scan_inst->dst.reg_offset = reg_to_offset;
- }
- for (int i = 0; i < 3; i++) {
- if (scan_inst->src[i].file == GRF &&
- scan_inst->src[i].reg == reg_from) {
- scan_inst->src[i].reg = reg_to;
- scan_inst->src[i].reg_offset = reg_to_offset;
- }
- }
- }
+ int var_from = live_intervals->var_from_reg(&inst->src[0]);
+ int var_to = live_intervals->var_from_reg(&inst->dst);
+
+ if (live_intervals->vars_interfere(var_from, var_to) &&
+ !inst->dst.equals(inst->src[0])) {
+
+ /* We know that the live ranges of A (var_from) and B (var_to)
+ * interfere because of the ->vars_interfere() call above. If the end
+ * of B's live range is after the end of A's range, then we know two
+ * things:
+ * - the start of B's live range must be in A's live range (since we
+ * already know the two ranges interfere, this is the only remaining
+ * possibility)
+ * - the interference isn't of the form we're looking for (where B is
+ * entirely inside A)
+ */
+ if (live_intervals->end[var_to] > live_intervals->end[var_from])
+ continue;
- inst->remove();
+ bool overwritten = false;
+ int scan_ip = -1;
- /* We don't need to recalculate live intervals inside the loop despite
- * flagging live_intervals_valid because we only use live intervals for
- * the interferes test, and we must have had a situation where the
- * intervals were:
- *
- * from to
- * ^
- * |
- * v
- * ^
- * |
- * v
- *
- * Some register R that might get coalesced with one of these two could
- * only be referencing "to", otherwise "from"'s range would have been
- * longer. R's range could also only start at the end of "to" or later,
- * otherwise it will conflict with "to" when we try to coalesce "to"
- * into Rw anyway.
- */
- live_intervals_valid = false;
+ foreach_list(n, &this->instructions) {
+ fs_inst *scan_inst = (fs_inst *)n;
+ scan_ip++;
- progress = true;
- continue;
- }
+ if (scan_inst->is_control_flow()) {
+ overwritten = true;
+ break;
+ }
- return progress;
-}
+ if (scan_ip <= live_intervals->start[var_to])
+ continue;
-bool
-fs_visitor::register_coalesce()
-{
- bool progress = false;
- int if_depth = 0;
- int loop_depth = 0;
+ if (scan_ip > live_intervals->end[var_to])
+ break;
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
+ if (scan_inst->dst.equals(inst->dst) ||
+ scan_inst->dst.equals(inst->src[0])) {
+ overwritten = true;
+ break;
+ }
+ }
- /* Make sure that we dominate the instructions we're going to
- * scan for interfering with our coalescing, or we won't have
- * scanned enough to see if anything interferes with our
- * coalescing. We don't dominate the following instructions if
- * we're in a loop or an if block.
- */
- switch (inst->opcode) {
- case BRW_OPCODE_DO:
- loop_depth++;
- break;
- case BRW_OPCODE_WHILE:
- loop_depth--;
- break;
- case BRW_OPCODE_IF:
- if_depth++;
- break;
- case BRW_OPCODE_ENDIF:
- if_depth--;
- break;
- default:
- break;
+ if (overwritten)
+ continue;
}
- if (loop_depth || if_depth)
- continue;
- if (inst->opcode != BRW_OPCODE_MOV ||
- inst->is_partial_write() ||
- inst->saturate ||
- inst->dst.file != GRF || (inst->src[0].file != GRF &&
- inst->src[0].file != UNIFORM)||
- inst->dst.type != inst->src[0].type)
- continue;
+ if (reg_from != inst->src[0].reg) {
+ reg_from = inst->src[0].reg;
- bool has_source_modifiers = (inst->src[0].abs ||
- inst->src[0].negate ||
- inst->src[0].smear != -1 ||
- inst->src[0].file == UNIFORM);
+ src_size = virtual_grf_sizes[inst->src[0].reg];
+ assert(src_size <= MAX_SAMPLER_MESSAGE_SIZE);
- /* Found a move of a GRF to a GRF. Let's see if we can coalesce
- * them: check for no writes to either one until the exit of the
- * program.
- */
- bool interfered = false;
-
- for (fs_inst *scan_inst = (fs_inst *)inst->next;
- !scan_inst->is_tail_sentinel();
- scan_inst = (fs_inst *)scan_inst->next) {
- if (scan_inst->dst.file == GRF) {
- if (scan_inst->overwrites_reg(inst->dst) ||
- scan_inst->overwrites_reg(inst->src[0])) {
- interfered = true;
- break;
- }
- }
+ channels_remaining = src_size;
+ memset(mov, 0, sizeof(mov));
- if (has_source_modifiers) {
- for (int i = 0; i < 3; i++) {
- if (scan_inst->src[i].file == GRF &&
- scan_inst->src[i].reg == inst->dst.reg &&
- scan_inst->src[i].reg_offset == inst->dst.reg_offset &&
- inst->dst.type != scan_inst->src[i].type)
- {
- interfered = true;
- break;
- }
- }
- }
+ reg_to = inst->dst.reg;
+ }
+ if (reg_to != inst->dst.reg)
+ continue;
- /* The gen6 MATH instruction can't handle source modifiers or
- * unusual register regions, so avoid coalescing those for
- * now. We should do something more specific.
- */
- if (has_source_modifiers && !can_do_source_mods(scan_inst)) {
- interfered = true;
- break;
- }
+ const int offset = inst->src[0].reg_offset;
+ reg_to_offset[offset] = inst->dst.reg_offset;
+ mov[offset] = inst;
+ channels_remaining--;
- /* The accumulator result appears to get used for the
- * conditional modifier generation. When negating a UD
- * value, there is a 33rd bit generated for the sign in the
- * accumulator value, so now you can't check, for example,
- * equality with a 32-bit value. See piglit fs-op-neg-uint.
- */
- if (scan_inst->conditional_mod &&
- inst->src[0].negate &&
- inst->src[0].type == BRW_REGISTER_TYPE_UD) {
- interfered = true;
- break;
- }
- }
- if (interfered) {
- continue;
+ if (channels_remaining)
+ continue;
+
+ bool removed = false;
+ for (int i = 0; i < src_size; i++) {
+ if (mov[i]) {
+ removed = true;
+
+ mov[i]->opcode = BRW_OPCODE_NOP;
+ mov[i]->conditional_mod = BRW_CONDITIONAL_NONE;
+ mov[i]->dst = reg_undef;
+ mov[i]->src[0] = reg_undef;
+ mov[i]->src[1] = reg_undef;
+ mov[i]->src[2] = reg_undef;
+ }
}
- /* Rewrite the later usage to point at the source of the move to
- * be removed.
- */
- for (fs_inst *scan_inst = inst;
- !scan_inst->is_tail_sentinel();
- scan_inst = (fs_inst *)scan_inst->next) {
- for (int i = 0; i < 3; i++) {
- if (scan_inst->src[i].file == GRF &&
- scan_inst->src[i].reg == inst->dst.reg &&
- scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
- fs_reg new_src = inst->src[0];
- if (scan_inst->src[i].abs) {
- new_src.negate = 0;
- new_src.abs = 1;
+ foreach_list(node, &this->instructions) {
+ fs_inst *scan_inst = (fs_inst *)node;
+
+ for (int i = 0; i < src_size; i++) {
+ if (mov[i]) {
+ if (scan_inst->dst.file == GRF &&
+ scan_inst->dst.reg == reg_from &&
+ scan_inst->dst.reg_offset == i) {
+ scan_inst->dst.reg = reg_to;
+ scan_inst->dst.reg_offset = reg_to_offset[i];
}
- new_src.negate ^= scan_inst->src[i].negate;
- scan_inst->src[i] = new_src;
- }
- }
+ for (int j = 0; j < 3; j++) {
+ if (scan_inst->src[j].file == GRF &&
+ scan_inst->src[j].reg == reg_from &&
+ scan_inst->src[j].reg_offset == i) {
+ scan_inst->src[j].reg = reg_to;
+ scan_inst->src[j].reg_offset = reg_to_offset[i];
+ }
+ }
+ }
+ }
}
- inst->remove();
- progress = true;
+ if (removed) {
+ live_intervals->start[var_to] = MIN2(live_intervals->start[var_to],
+ live_intervals->start[var_from]);
+ live_intervals->end[var_to] = MAX2(live_intervals->end[var_to],
+ live_intervals->end[var_from]);
+ reg_from = -1;
+ }
+ }
+
+ foreach_list_safe(node, &this->instructions) {
+ fs_inst *inst = (fs_inst *)node;
+
+ if (inst->opcode == BRW_OPCODE_NOP) {
+ inst->remove();
+ progress = true;
+ }
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
-
bool
fs_visitor::compute_to_mrf()
{
inst->is_partial_write() ||
inst->dst.file != MRF || inst->src[0].file != GRF ||
inst->dst.type != inst->src[0].type ||
- inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
+ inst->src[0].abs || inst->src[0].negate ||
+ !inst->src[0].is_contiguous() ||
+ inst->src[0].subreg_offset)
continue;
/* Work out which hardware MRF registers are written by this
}
}
- if (scan_inst->mlen > 0) {
+ if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
/* Found a SEND instruction, which means that there are
* live values in MRFs from base_mrf to base_mrf +
* scan_inst->mlen - 1. Don't go pushing our MRF write up
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
last_mrf_move[inst->dst.reg] = NULL;
}
- if (inst->mlen > 0) {
+ if (inst->mlen > 0 && inst->base_mrf != -1) {
/* Found a SEND instruction, which will include two or fewer
* implied MRF writes. We could do better here.
*/
}
if (progress)
- live_intervals_valid = false;
+ invalidate_live_intervals();
return progress;
}
clear_deps_for_inst_src(fs_inst *inst, int dispatch_width, bool *deps,
int first_grf, int grf_len)
{
- bool inst_16wide = (dispatch_width > 8 &&
+ bool inst_simd16 = (dispatch_width > 8 &&
!inst->force_uncompressed &&
!inst->force_sechalf);
if (grf >= first_grf &&
grf < first_grf + grf_len) {
deps[grf - first_grf] = false;
- if (inst_16wide)
+ if (inst_simd16)
deps[grf - first_grf + 1] = false;
}
}
return;
}
- bool scan_inst_16wide = (dispatch_width > 8 &&
+ bool scan_inst_simd16 = (dispatch_width > 8 &&
!scan_inst->force_uncompressed &&
!scan_inst->force_sechalf);
needs_dep[reg - first_write_grf]) {
inst->insert_before(DEP_RESOLVE_MOV(reg));
needs_dep[reg - first_write_grf] = false;
- if (scan_inst_16wide)
+ if (scan_inst_simd16)
needs_dep[reg - first_write_grf + 1] = false;
}
}
inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
inst->src[1] = payload;
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
} else {
/* Before register allocation, we didn't tell the scheduler about the
* MRF we use. We know it's safe to use this MRF because nothing
}
}
+void
+fs_visitor::dump_instructions()
+{
+ calculate_register_pressure();
+
+ int ip = 0, max_pressure = 0;
+ foreach_list(node, &this->instructions) {
+ backend_instruction *inst = (backend_instruction *)node;
+ max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
+ fprintf(stderr, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
+ dump_instruction(inst);
+ ++ip;
+ }
+ fprintf(stderr, "Maximum %3d registers live at once.\n", max_pressure);
+}
+
void
fs_visitor::dump_instruction(backend_instruction *be_inst)
{
fs_inst *inst = (fs_inst *)be_inst;
if (inst->predicate) {
- printf("(%cf0.%d) ",
+ fprintf(stderr, "(%cf0.%d) ",
inst->predicate_inverse ? '-' : '+',
inst->flag_subreg);
}
- printf("%s", brw_instruction_name(inst->opcode));
+ fprintf(stderr, "%s", brw_instruction_name(inst->opcode));
if (inst->saturate)
- printf(".sat");
+ fprintf(stderr, ".sat");
if (inst->conditional_mod) {
- printf(".cmod");
+ fprintf(stderr, "%s", conditional_modifier[inst->conditional_mod]);
if (!inst->predicate &&
(brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
- printf(".f0.%d", inst->flag_subreg);
+ fprintf(stderr, ".f0.%d", inst->flag_subreg);
}
}
- printf(" ");
+ fprintf(stderr, " ");
switch (inst->dst.file) {
case GRF:
- printf("vgrf%d", inst->dst.reg);
- if (inst->dst.reg_offset)
- printf("+%d", inst->dst.reg_offset);
+ fprintf(stderr, "vgrf%d", inst->dst.reg);
+ if (virtual_grf_sizes[inst->dst.reg] != 1 ||
+ inst->dst.subreg_offset)
+ fprintf(stderr, "+%d.%d",
+ inst->dst.reg_offset, inst->dst.subreg_offset);
break;
case MRF:
- printf("m%d", inst->dst.reg);
+ fprintf(stderr, "m%d", inst->dst.reg);
break;
case BAD_FILE:
- printf("(null)");
+ fprintf(stderr, "(null)");
break;
case UNIFORM:
- printf("***u%d***", inst->dst.reg);
+ fprintf(stderr, "***u%d***", inst->dst.reg);
+ break;
+ case HW_REG:
+ if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
+ switch (inst->dst.fixed_hw_reg.nr) {
+ case BRW_ARF_NULL:
+ fprintf(stderr, "null");
+ break;
+ case BRW_ARF_ADDRESS:
+ fprintf(stderr, "a0.%d", inst->dst.fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_ACCUMULATOR:
+ fprintf(stderr, "acc%d", inst->dst.fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_FLAG:
+ fprintf(stderr, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ inst->dst.fixed_hw_reg.subnr);
+ break;
+ default:
+ fprintf(stderr, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ inst->dst.fixed_hw_reg.subnr);
+ break;
+ }
+ } else {
+ fprintf(stderr, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
+ }
+ if (inst->dst.fixed_hw_reg.subnr)
+ fprintf(stderr, "+%d", inst->dst.fixed_hw_reg.subnr);
break;
default:
- printf("???");
+ fprintf(stderr, "???");
break;
}
- printf(", ");
+ fprintf(stderr, ":%s, ", brw_reg_type_letters(inst->dst.type));
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
if (inst->src[i].negate)
- printf("-");
+ fprintf(stderr, "-");
if (inst->src[i].abs)
- printf("|");
+ fprintf(stderr, "|");
switch (inst->src[i].file) {
case GRF:
- printf("vgrf%d", inst->src[i].reg);
- if (inst->src[i].reg_offset)
- printf("+%d", inst->src[i].reg_offset);
+ fprintf(stderr, "vgrf%d", inst->src[i].reg);
+ if (virtual_grf_sizes[inst->src[i].reg] != 1 ||
+ inst->src[i].subreg_offset)
+ fprintf(stderr, "+%d.%d", inst->src[i].reg_offset,
+ inst->src[i].subreg_offset);
break;
case MRF:
- printf("***m%d***", inst->src[i].reg);
+ fprintf(stderr, "***m%d***", inst->src[i].reg);
break;
case UNIFORM:
- printf("u%d", inst->src[i].reg);
- if (inst->src[i].reg_offset)
- printf(".%d", inst->src[i].reg_offset);
+ fprintf(stderr, "u%d", inst->src[i].reg);
+ if (inst->src[i].reladdr) {
+ fprintf(stderr, "+reladdr");
+ } else if (virtual_grf_sizes[inst->src[i].reg] != 1 ||
+ inst->src[i].subreg_offset) {
+ fprintf(stderr, "+%d.%d", inst->src[i].reg_offset,
+ inst->src[i].subreg_offset);
+ }
break;
case BAD_FILE:
- printf("(null)");
+ fprintf(stderr, "(null)");
break;
case IMM:
switch (inst->src[i].type) {
case BRW_REGISTER_TYPE_F:
- printf("%ff", inst->src[i].imm.f);
+ fprintf(stderr, "%ff", inst->src[i].imm.f);
break;
case BRW_REGISTER_TYPE_D:
- printf("%dd", inst->src[i].imm.i);
+ fprintf(stderr, "%dd", inst->src[i].imm.i);
break;
case BRW_REGISTER_TYPE_UD:
- printf("%uu", inst->src[i].imm.u);
+ fprintf(stderr, "%uu", inst->src[i].imm.u);
break;
default:
- printf("???");
+ fprintf(stderr, "???");
break;
}
break;
+ case HW_REG:
+ if (inst->src[i].fixed_hw_reg.negate)
+ fprintf(stderr, "-");
+ if (inst->src[i].fixed_hw_reg.abs)
+ fprintf(stderr, "|");
+ if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
+ switch (inst->src[i].fixed_hw_reg.nr) {
+ case BRW_ARF_NULL:
+ fprintf(stderr, "null");
+ break;
+ case BRW_ARF_ADDRESS:
+ fprintf(stderr, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_ACCUMULATOR:
+ fprintf(stderr, "acc%d", inst->src[i].fixed_hw_reg.subnr);
+ break;
+ case BRW_ARF_FLAG:
+ fprintf(stderr, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ inst->src[i].fixed_hw_reg.subnr);
+ break;
+ default:
+ fprintf(stderr, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ inst->src[i].fixed_hw_reg.subnr);
+ break;
+ }
+ } else {
+ fprintf(stderr, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
+ }
+ if (inst->src[i].fixed_hw_reg.subnr)
+ fprintf(stderr, "+%d", inst->src[i].fixed_hw_reg.subnr);
+ if (inst->src[i].fixed_hw_reg.abs)
+ fprintf(stderr, "|");
+ break;
default:
- printf("???");
+ fprintf(stderr, "???");
break;
}
if (inst->src[i].abs)
- printf("|");
+ fprintf(stderr, "|");
- if (i < 3)
- printf(", ");
+ if (inst->src[i].file != IMM) {
+ fprintf(stderr, ":%s", brw_reg_type_letters(inst->src[i].type));
+ }
+
+ if (i < 2 && inst->src[i + 1].file != BAD_FILE)
+ fprintf(stderr, ", ");
}
- printf(" ");
+ fprintf(stderr, " ");
if (inst->force_uncompressed)
- printf("1sthalf ");
+ fprintf(stderr, "1sthalf ");
if (inst->force_sechalf)
- printf("2ndhalf ");
+ fprintf(stderr, "2ndhalf ");
- printf("\n");
+ fprintf(stderr, "\n");
}
/**
fs_inst *
fs_visitor::get_instruction_generating_reg(fs_inst *start,
fs_inst *end,
- fs_reg reg)
+ const fs_reg ®)
{
if (end == start ||
end->is_partial_write() ||
c->source_depth_reg = c->nr_payload_regs;
c->nr_payload_regs++;
if (dispatch_width == 16) {
- /* R28: interpolated depth if not 8-wide. */
+ /* R28: interpolated depth if not SIMD8. */
c->nr_payload_regs++;
}
}
c->source_w_reg = c->nr_payload_regs;
c->nr_payload_regs++;
if (dispatch_width == 16) {
- /* R30: interpolated W if not 8-wide. */
+ /* R30: interpolated W if not SIMD8. */
c->nr_payload_regs++;
}
}
+
+ c->prog_data.uses_pos_offset = c->key.compute_pos_offset;
/* R31: MSAA position offsets. */
- /* R32-: bary for 32-pixel. */
+ if (c->prog_data.uses_pos_offset) {
+ c->sample_pos_reg = c->nr_payload_regs;
+ c->nr_payload_regs++;
+ }
+
+ /* R32: MSAA input coverage mask */
+ if (fp->Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN) {
+ assert(brw->gen >= 7);
+ c->sample_mask_reg = c->nr_payload_regs;
+ c->nr_payload_regs++;
+ if (dispatch_width == 16) {
+ /* R33: input coverage mask if not SIMD8. */
+ c->nr_payload_regs++;
+ }
+ }
+
+ /* R34-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
}
}
+void
+fs_visitor::assign_binding_table_offsets()
+{
+ uint32_t next_binding_table_offset = 0;
+
+ /* If there are no color regions, we still perform an FB write to a null
+ * renderbuffer, which we place at surface index 0.
+ */
+ c->prog_data.binding_table.render_target_start = next_binding_table_offset;
+ next_binding_table_offset += MAX2(c->key.nr_color_regions, 1);
+
+ assign_common_binding_table_offsets(next_binding_table_offset);
+}
+
+void
+fs_visitor::calculate_register_pressure()
+{
+ calculate_live_intervals();
+
+ int num_instructions = 0;
+ foreach_list(node, &this->instructions) {
+ ++num_instructions;
+ }
+
+ regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
+
+ for (int reg = 0; reg < virtual_grf_count; reg++) {
+ for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
+ regs_live_at_ip[ip] += virtual_grf_sizes[reg];
+ }
+}
+
+/**
+ * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
+ *
+ * The needs_unlit_centroid_workaround ends up producing one of these per
+ * channel of centroid input, so it's good to clean them up.
+ *
+ * An assumption here is that nothing ever modifies the dispatched pixels
+ * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
+ * dictates that anyway.
+ */
+void
+fs_visitor::opt_drop_redundant_mov_to_flags()
+{
+ bool flag_mov_found[2] = {false};
+
+ foreach_list_safe(node, &this->instructions) {
+ fs_inst *inst = (fs_inst *)node;
+
+ if (inst->is_control_flow()) {
+ memset(flag_mov_found, 0, sizeof(flag_mov_found));
+ } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
+ if (!flag_mov_found[inst->flag_subreg])
+ flag_mov_found[inst->flag_subreg] = true;
+ else
+ inst->remove();
+ } else if (inst->writes_flag()) {
+ flag_mov_found[inst->flag_subreg] = false;
+ }
+ }
+}
+
bool
fs_visitor::run()
{
sanity_param_count = fp->Base.Parameters->NumParameters;
- uint32_t orig_nr_params = c->prog_data.nr_params;
+ bool allocated_without_spills;
+
+ assign_binding_table_offsets();
if (brw->gen >= 6)
setup_payload_gen6();
emit_shader_time_begin();
calculate_urb_setup();
- if (brw->gen < 6)
- emit_interpolation_setup_gen4();
- else
- emit_interpolation_setup_gen6();
+ if (fp->Base.InputsRead > 0) {
+ if (brw->gen < 6)
+ emit_interpolation_setup_gen4();
+ else
+ emit_interpolation_setup_gen6();
+ }
/* We handle discards by keeping track of the still-live pixels in f0.1.
* Initialize it with the dispatched pixels.
*/
- if (fp->UsesKill) {
+ if (fp->UsesKill || c->key.alpha_test_func) {
fs_inst *discard_init = emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
discard_init->flag_subreg = 1;
}
* functions called "main").
*/
if (shader) {
- foreach_list(node, &*shader->ir) {
+ foreach_list(node, &*shader->base.ir) {
ir_instruction *ir = (ir_instruction *)node;
base_ir = ir;
this->result = reg_undef;
emit(FS_OPCODE_PLACEHOLDER_HALT);
+ if (c->key.alpha_test_func)
+ emit_alpha_test();
+
emit_fb_writes();
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
+ remove_dead_constants();
setup_pull_constants();
+ opt_drop_redundant_mov_to_flags();
+
bool progress;
do {
progress = false;
progress = opt_algebraic() || progress;
progress = opt_cse() || progress;
progress = opt_copy_propagate() || progress;
+ progress = opt_peephole_predicated_break() || progress;
progress = dead_code_eliminate() || progress;
progress = dead_code_eliminate_local() || progress;
- progress = register_coalesce() || progress;
- progress = register_coalesce_2() || progress;
+ progress = opt_peephole_sel() || progress;
+ progress = dead_control_flow_eliminate(this) || progress;
+ progress = opt_saturate_propagation() || progress;
+ progress = register_coalesce() || progress;
progress = compute_to_mrf() || progress;
} while (progress);
- remove_dead_constants();
-
- schedule_instructions(false);
-
lower_uniform_pull_constant_loads();
assign_curb_setup();
assign_urb_setup();
- if (0) {
- /* Debug of register spilling: Go spill everything. */
- for (int i = 0; i < virtual_grf_count; i++) {
- spill_reg(i);
- }
+ static enum instruction_scheduler_mode pre_modes[] = {
+ SCHEDULE_PRE,
+ SCHEDULE_PRE_NON_LIFO,
+ SCHEDULE_PRE_LIFO,
+ };
+
+ /* Try each scheduling heuristic to see if it can successfully register
+ * allocate without spilling. They should be ordered by decreasing
+ * performance but increasing likelihood of allocating.
+ */
+ for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
+ schedule_instructions(pre_modes[i]);
+
+ if (0) {
+ assign_regs_trivial();
+ allocated_without_spills = true;
+ } else {
+ allocated_without_spills = assign_regs(false);
+ }
+ if (allocated_without_spills)
+ break;
}
- if (0)
- assign_regs_trivial();
- else {
- while (!assign_regs()) {
- if (failed)
- break;
- }
+ if (!allocated_without_spills) {
+ /* We assume that any spilling is worse than just dropping back to
+ * SIMD8. There's probably actually some intermediate point where
+ * SIMD16 with a couple of spills is still better.
+ */
+ if (dispatch_width == 16) {
+ fail("Failure to register allocate. Reduce number of "
+ "live scalar values to avoid this.");
+ }
+
+ /* Since we're out of heuristics, just go spill registers until we
+ * get an allocation.
+ */
+ while (!assign_regs(true)) {
+ if (failed)
+ break;
+ }
}
}
assert(force_uncompressed_stack == 0);
- assert(force_sechalf_stack == 0);
/* This must come after all optimization and register allocation, since
* it inserts dead code that happens to have side effects, and it does
if (failed)
return false;
- schedule_instructions(true);
+ if (!allocated_without_spills)
+ schedule_instructions(SCHEDULE_POST);
- if (dispatch_width == 8) {
+ if (dispatch_width == 8)
c->prog_data.reg_blocks = brw_register_blocks(grf_used);
- } else {
+ else
c->prog_data.reg_blocks_16 = brw_register_blocks(grf_used);
- /* Make sure we didn't try to sneak in an extra uniform */
- assert(orig_nr_params == c->prog_data.nr_params);
- (void) orig_nr_params;
- }
-
/* If any state parameters were appended, then ParameterValues could have
* been realloced, in which case the driver uniform storage set up by
* _mesa_associate_uniform_storage() would point to freed memory. Make
unsigned *final_assembly_size)
{
bool start_busy = false;
- float start_time = 0;
+ double start_time = 0;
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
if (prog)
shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- if (prog) {
- printf("GLSL IR for native fragment shader %d:\n", prog->Name);
- _mesa_print_ir(shader->ir, NULL);
- printf("\n\n");
- } else {
- printf("ARB_fragment_program %d ir for native fragment shader\n",
- fp->Base.Id);
- _mesa_print_program(&fp->Base);
- }
- }
+ if (unlikely(INTEL_DEBUG & DEBUG_WM))
+ brw_dump_ir(brw, "fragment", prog, &shader->base, &fp->Base);
/* Now the main event: Visit the shader IR and generate our FS IR for it.
*/
exec_list *simd16_instructions = NULL;
fs_visitor v2(brw, c, prog, fp, 16);
if (brw->gen >= 5 && likely(!(INTEL_DEBUG & DEBUG_NO16))) {
- if (c->prog_data.nr_pull_params == 0) {
- /* Try a 16-wide compile */
+ if (c->prog_data.base.nr_pull_params == 0) {
+ /* Try a SIMD16 compile */
v2.import_uniforms(&v);
if (!v2.run()) {
- perf_debug("16-wide shader failed to compile, falling back to "
- "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
+ perf_debug("SIMD16 shader failed to compile, falling back to "
+ "SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
} else {
simd16_instructions = &v2.instructions;
}
} else {
- perf_debug("Skipping 16-wide due to pull parameters.\n");
+ perf_debug("Skipping SIMD16 due to pull parameters.\n");
}
}
- c->prog_data.dispatch_width = 8;
-
- fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
- const unsigned *generated = g.generate_assembly(&v.instructions,
- simd16_instructions,
- final_assembly_size);
+ const unsigned *assembly = NULL;
+ if (brw->gen >= 8) {
+ gen8_fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
+ assembly = g.generate_assembly(&v.instructions, simd16_instructions,
+ final_assembly_size);
+ } else {
+ fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
+ assembly = g.generate_assembly(&v.instructions, simd16_instructions,
+ final_assembly_size);
+ }
if (unlikely(brw->perf_debug) && shader) {
if (shader->compiled_once)
}
}
- return generated;
+ return assembly;
}
bool
key.drawable_height = ctx->DrawBuffer->Height;
}
+ key.nr_color_regions = _mesa_bitcount_64(fp->Base.OutputsWritten &
+ ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
+ BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
+
if ((fp->Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
- key.render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
+ key.render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer) ||
+ key.nr_color_regions > 1;
}
- key.nr_color_regions = 1;
-
/* GL_FRAGMENT_SHADER_DERIVATIVE_HINT is almost always GL_DONT_CARE. The
* quality of the derivatives is likely to be determined by the driconf
* option.