#include "glsl/glsl_types.h"
void
-fs_inst::init()
+fs_inst::init(enum opcode opcode, const fs_reg &dst, fs_reg *src, int sources)
{
memset(this, 0, sizeof(*this));
- this->opcode = BRW_OPCODE_NOP;
- this->conditional_mod = BRW_CONDITIONAL_NONE;
- this->dst = reg_undef;
- this->src[0] = reg_undef;
- this->src[1] = reg_undef;
- this->src[2] = reg_undef;
+ this->opcode = opcode;
+ this->dst = dst;
+ this->src = src;
+ this->sources = sources;
+
+ this->conditional_mod = BRW_CONDITIONAL_NONE;
/* This will be the case for almost all instructions. */
this->regs_written = 1;
+
+ this->writes_accumulator = false;
}
-fs_inst::fs_inst()
+fs_inst::fs_inst(enum opcode opcode, const fs_reg &dst)
{
- init();
+ fs_reg *src = ralloc_array(this, fs_reg, 3);
+ init(opcode, dst, src, 0);
}
-fs_inst::fs_inst(enum opcode opcode)
+fs_inst::fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0)
{
- init();
- this->opcode = opcode;
+ fs_reg *src = ralloc_array(this, fs_reg, 3);
+ src[0] = src0;
+ init(opcode, dst, src, 1);
}
-fs_inst::fs_inst(enum opcode opcode, fs_reg dst)
+fs_inst::fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
+ const fs_reg &src1)
{
- init();
- this->opcode = opcode;
- this->dst = dst;
-
- if (dst.file == GRF)
- assert(dst.reg_offset >= 0);
+ fs_reg *src = ralloc_array(this, fs_reg, 3);
+ src[0] = src0;
+ src[1] = src1;
+ init(opcode, dst, src, 2);
}
-fs_inst::fs_inst(enum opcode opcode, fs_reg dst, fs_reg src0)
+fs_inst::fs_inst(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
+ const fs_reg &src1, const fs_reg &src2)
{
- init();
- this->opcode = opcode;
- this->dst = dst;
- this->src[0] = src0;
+ fs_reg *src = ralloc_array(this, fs_reg, 3);
+ src[0] = src0;
+ src[1] = src1;
+ src[2] = src2;
+ init(opcode, dst, src, 3);
+}
- if (dst.file == GRF)
- assert(dst.reg_offset >= 0);
- if (src[0].file == GRF)
- assert(src[0].reg_offset >= 0);
+fs_inst::fs_inst(enum opcode opcode, const fs_reg &dst, fs_reg src[], int sources)
+{
+ init(opcode, dst, src, sources);
}
-fs_inst::fs_inst(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
+fs_inst::fs_inst(const fs_inst &that)
{
- init();
- this->opcode = opcode;
- this->dst = dst;
- this->src[0] = src0;
- this->src[1] = src1;
+ memcpy(this, &that, sizeof(that));
+
+ this->src = ralloc_array(this, fs_reg, that.sources);
- if (dst.file == GRF)
- assert(dst.reg_offset >= 0);
- if (src[0].file == GRF)
- assert(src[0].reg_offset >= 0);
- if (src[1].file == GRF)
- assert(src[1].reg_offset >= 0);
+ for (int i = 0; i < that.sources; i++)
+ this->src[i] = that.src[i];
}
-fs_inst::fs_inst(enum opcode opcode, fs_reg dst,
- fs_reg src0, fs_reg src1, fs_reg src2)
+void
+fs_inst::resize_sources(uint8_t num_sources)
{
- init();
- this->opcode = opcode;
- this->dst = dst;
- this->src[0] = src0;
- this->src[1] = src1;
- this->src[2] = src2;
-
- if (dst.file == GRF)
- assert(dst.reg_offset >= 0);
- if (src[0].file == GRF)
- assert(src[0].reg_offset >= 0);
- if (src[1].file == GRF)
- assert(src[1].reg_offset >= 0);
- if (src[2].file == GRF)
- assert(src[2].reg_offset >= 0);
+ if (this->sources != num_sources) {
+ this->src = reralloc(this, this->src, fs_reg, num_sources);
+ this->sources = num_sources;
+ }
}
#define ALU1(op) \
fs_inst * \
- fs_visitor::op(fs_reg dst, fs_reg src0) \
+ fs_visitor::op(const fs_reg &dst, const fs_reg &src0) \
{ \
return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0); \
}
#define ALU2(op) \
fs_inst * \
- fs_visitor::op(fs_reg dst, fs_reg src0, fs_reg src1) \
+ fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
+ const fs_reg &src1) \
{ \
return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1); \
}
+#define ALU2_ACC(op) \
+ fs_inst * \
+ fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
+ const fs_reg &src1) \
+ { \
+ fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1);\
+ inst->writes_accumulator = true; \
+ return inst; \
+ }
+
#define ALU3(op) \
fs_inst * \
- fs_visitor::op(fs_reg dst, fs_reg src0, fs_reg src1, fs_reg src2) \
+ fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
+ const fs_reg &src1, const fs_reg &src2) \
{ \
return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1, src2);\
}
ALU1(RNDZ)
ALU2(ADD)
ALU2(MUL)
-ALU2(MACH)
+ALU2_ACC(MACH)
ALU2(AND)
ALU2(OR)
ALU2(XOR)
ALU1(FBL)
ALU1(CBIT)
ALU3(MAD)
-ALU2(ADDC)
-ALU2(SUBB)
+ALU2_ACC(ADDC)
+ALU2_ACC(SUBB)
ALU2(SEL)
+ALU2(MAC)
/** Gen4 predicated IF. */
fs_inst *
-fs_visitor::IF(uint32_t predicate)
+fs_visitor::IF(enum brw_predicate predicate)
{
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF);
inst->predicate = predicate;
/** Gen6 IF with embedded comparison. */
fs_inst *
-fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition)
+fs_visitor::IF(const fs_reg &src0, const fs_reg &src1,
+ enum brw_conditional_mod condition)
{
assert(brw->gen == 6);
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF,
* the flag register with the packed 16 bits of the result.
*/
fs_inst *
-fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1, uint32_t condition)
+fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1,
+ enum brw_conditional_mod condition)
{
fs_inst *inst;
return inst;
}
+fs_inst *
+fs_visitor::LOAD_PAYLOAD(const fs_reg &dst, fs_reg *src, int sources)
+{
+ fs_inst *inst = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD, dst, src,
+ sources);
+ inst->regs_written = sources;
+
+ return inst;
+}
+
exec_list
-fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index,
- fs_reg varying_offset,
+fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_reg &dst,
+ const fs_reg &surf_index,
+ const fs_reg &varying_offset,
uint32_t const_offset)
{
exec_list instructions;
}
bool
-fs_inst::equals(fs_inst *inst)
+fs_inst::equals(fs_inst *inst) const
{
return (opcode == inst->opcode &&
dst.equals(inst->dst) &&
}
bool
-fs_inst::overwrites_reg(const fs_reg ®)
+fs_inst::overwrites_reg(const fs_reg ®) const
{
return (reg.file == dst.file &&
reg.reg == dst.reg &&
}
bool
-fs_inst::is_send_from_grf()
+fs_inst::is_send_from_grf() const
{
return (opcode == FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7 ||
opcode == SHADER_OPCODE_SHADER_TIME_ADD ||
+ opcode == FS_OPCODE_INTERPOLATE_AT_CENTROID ||
+ opcode == FS_OPCODE_INTERPOLATE_AT_SAMPLE ||
+ opcode == FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET ||
+ opcode == FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET ||
(opcode == FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD &&
src[1].file == GRF) ||
(is_tex() && src[0].file == GRF));
}
bool
-fs_visitor::can_do_source_mods(fs_inst *inst)
+fs_inst::can_do_source_mods(struct brw_context *brw)
{
- if (brw->gen == 6 && inst->is_math())
+ if (brw->gen == 6 && is_math())
return false;
- if (inst->is_send_from_grf())
+ if (is_send_from_grf())
return false;
- if (!inst->can_do_source_mods())
+ if (!backend_instruction::can_do_source_mods())
return false;
return true;
fs_reg::init()
{
memset(this, 0, sizeof(*this));
- this->smear = -1;
stride = 1;
}
init();
this->file = IMM;
this->type = BRW_REGISTER_TYPE_F;
- this->imm.f = f;
+ this->fixed_hw_reg.dw1.f = f;
}
/** Immediate value constructor. */
init();
this->file = IMM;
this->type = BRW_REGISTER_TYPE_D;
- this->imm.i = i;
+ this->fixed_hw_reg.dw1.d = i;
}
/** Immediate value constructor. */
init();
this->file = IMM;
this->type = BRW_REGISTER_TYPE_UD;
- this->imm.u = u;
+ this->fixed_hw_reg.dw1.ud = u;
}
/** Fixed brw_reg. */
!reladdr && !r.reladdr &&
memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
sizeof(fixed_hw_reg)) == 0 &&
- smear == r.smear &&
- stride == r.stride &&
- imm.u == r.imm.u);
-}
-
-fs_reg
-fs_reg::retype(uint32_t type)
-{
- fs_reg result = *this;
- result.type = type;
- return result;
+ stride == r.stride);
}
fs_reg &
return *this;
}
-bool
-fs_reg::is_contiguous() const
-{
- return stride == 1;
-}
-
-bool
-fs_reg::is_zero() const
-{
- if (file != IMM)
- return false;
-
- return type == BRW_REGISTER_TYPE_F ? imm.f == 0.0 : imm.i == 0;
-}
-
-bool
-fs_reg::is_one() const
+fs_reg &
+fs_reg::set_smear(unsigned subreg)
{
- if (file != IMM)
- return false;
-
- return type == BRW_REGISTER_TYPE_F ? imm.f == 1.0 : imm.i == 1;
+ assert(file != HW_REG && file != IMM);
+ subreg_offset = subreg * type_sz(type);
+ stride = 0;
+ return *this;
}
bool
-fs_reg::is_null() const
+fs_reg::is_contiguous() const
{
- return file == HW_REG &&
- fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
- fixed_hw_reg.nr == BRW_ARF_NULL;
+ return stride == 1;
}
bool
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
- assert(!"not reached");
- break;
+ unreachable("not reached");
}
return 0;
* else that might disrupt timing) by setting smear to 2 and checking if
* that field is != 0.
*/
- dst.smear = 0;
+ dst.set_smear(0);
return dst;
}
* were the only two timestamp reads that happened).
*/
fs_reg reset = shader_end_time;
- reset.smear = 2;
+ reset.set_smear(2);
fs_inst *test = emit(AND(reg_null_d, reset, fs_reg(1u)));
test->conditional_mod = BRW_CONDITIONAL_Z;
emit(IF(BRW_PREDICATE_NORMAL));
else
payload = fs_reg(this, glsl_type::uint_type);
- emit(fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
- fs_reg(), payload, offset, value));
+ emit(new(mem_ctx) fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
+ fs_reg(), payload, offset, value));
}
void
-fs_visitor::fail(const char *format, ...)
+fs_visitor::vfail(const char *format, va_list va)
{
- va_list va;
char *msg;
if (failed)
failed = true;
- va_start(va, format);
msg = ralloc_vasprintf(mem_ctx, format, va);
- va_end(va);
msg = ralloc_asprintf(mem_ctx, "FS compile failed: %s\n", msg);
this->fail_msg = msg;
}
}
+void
+fs_visitor::fail(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ vfail(format, va);
+ va_end(va);
+}
+
+/**
+ * Mark this program as impossible to compile in SIMD16 mode.
+ *
+ * During the SIMD8 compile (which happens first), we can detect and flag
+ * things that are unsupported in SIMD16 mode, so the compiler can skip
+ * the SIMD16 compile altogether.
+ *
+ * During a SIMD16 compile (if one happens anyway), this just calls fail().
+ */
+void
+fs_visitor::no16(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+
+ if (dispatch_width == 16) {
+ vfail(format, va);
+ } else {
+ simd16_unsupported = true;
+
+ if (brw->perf_debug) {
+ if (no16_msg)
+ ralloc_vasprintf_append(&no16_msg, format, va);
+ else
+ no16_msg = ralloc_vasprintf(mem_ctx, format, va);
+ }
+ }
+
+ va_end(va);
+}
+
fs_inst *
fs_visitor::emit(enum opcode opcode)
{
- return emit(fs_inst(opcode));
+ return emit(new(mem_ctx) fs_inst(opcode));
+}
+
+fs_inst *
+fs_visitor::emit(enum opcode opcode, const fs_reg &dst)
+{
+ return emit(new(mem_ctx) fs_inst(opcode, dst));
}
fs_inst *
-fs_visitor::emit(enum opcode opcode, fs_reg dst)
+fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0)
{
- return emit(fs_inst(opcode, dst));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0));
}
fs_inst *
-fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0)
+fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
+ const fs_reg &src1)
{
- return emit(fs_inst(opcode, dst, src0));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1));
}
fs_inst *
-fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
+fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
+ const fs_reg &src1, const fs_reg &src2)
{
- return emit(fs_inst(opcode, dst, src0, src1));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1, src2));
}
fs_inst *
-fs_visitor::emit(enum opcode opcode, fs_reg dst,
- fs_reg src0, fs_reg src1, fs_reg src2)
+fs_visitor::emit(enum opcode opcode, const fs_reg &dst,
+ fs_reg src[], int sources)
{
- return emit(fs_inst(opcode, dst, src0, src1, src2));
+ return emit(new(mem_ctx) fs_inst(opcode, dst, src, sources));
}
void
* it.
*/
bool
-fs_inst::is_partial_write()
+fs_inst::is_partial_write() const
{
return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
this->force_uncompressed ||
}
int
-fs_inst::regs_read(fs_visitor *v, int arg)
+fs_inst::regs_read(fs_visitor *v, int arg) const
{
if (is_tex() && arg == 0 && src[0].file == GRF) {
if (v->dispatch_width == 16)
}
bool
-fs_inst::reads_flag()
+fs_inst::reads_flag() const
{
return predicate;
}
bool
-fs_inst::writes_flag()
+fs_inst::writes_flag() const
{
return (conditional_mod && opcode != BRW_OPCODE_SEL) ||
opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
return 2;
case SHADER_OPCODE_UNTYPED_ATOMIC:
case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+ case FS_OPCODE_INTERPOLATE_AT_CENTROID:
+ case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
+ case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
+ case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
return 0;
default:
- assert(!"not reached");
- return inst->mlen;
+ unreachable("not reached");
}
}
}
/** Fixed HW reg constructor. */
-fs_reg::fs_reg(enum register_file file, int reg, uint32_t type)
+fs_reg::fs_reg(enum register_file file, int reg, enum brw_reg_type type)
{
init();
this->file = file;
hash_table_call_foreach(v->variable_ht,
import_uniforms_callback,
variable_ht);
- this->params_remap = v->params_remap;
- this->nr_params_remap = v->nr_params_remap;
+ this->push_constant_loc = v->push_constant_loc;
+ this->pull_constant_loc = v->pull_constant_loc;
+ this->uniforms = v->uniforms;
+ this->param_size = v->param_size;
}
/* Our support for uniforms is piggy-backed on the struct
* order we'd walk the type, so walk the list of storage and find anything
* with our name, or the prefix of a component that starts with our name.
*/
- unsigned params_before = c->prog_data.nr_params;
+ unsigned params_before = uniforms;
for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
slots *= storage->array_elements;
for (unsigned i = 0; i < slots; i++) {
- c->prog_data.param[c->prog_data.nr_params++] =
- &storage->storage[i].f;
+ stage_prog_data->param[uniforms++] = &storage->storage[i].f;
}
}
/* Make sure we actually initialized the right amount of stuff here. */
- assert(params_before + ir->type->component_slots() ==
- c->prog_data.nr_params);
+ assert(params_before + ir->type->component_slots() == uniforms);
(void)params_before;
}
break;
last_swiz = swiz;
- c->prog_data.param[c->prog_data.nr_params++] =
+ stage_prog_data->param[uniforms++] =
&fp->Base.Parameters->ParameterValues[index][swiz].f;
}
}
{
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
fs_reg wpos = *reg;
- bool flip = !ir->data.origin_upper_left ^ c->key.render_to_fbo;
+ bool flip = !ir->data.origin_upper_left ^ key->render_to_fbo;
/* gl_FragCoord.x */
if (ir->data.pixel_center_integer) {
if (flip) {
pixel_y.negate = true;
- offset += c->key.drawable_height - 1.0;
+ offset += key->drawable_height - 1.0;
}
emit(ADD(wpos, pixel_y, fs_reg(offset)));
/* gl_FragCoord.z */
if (brw->gen >= 6) {
- emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
+ emit(MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0))));
} else {
emit(FS_OPCODE_LINTERP, wpos,
this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
}
glsl_interp_qualifier interpolation_mode =
- ir->determine_interpolation_mode(c->key.flat_shade);
+ ir->determine_interpolation_mode(key->flat_shade);
int location = ir->data.location;
for (unsigned int i = 0; i < array_elements; i++) {
for (unsigned int j = 0; j < type->matrix_columns; j++) {
- if (c->prog_data.urb_setup[location] == -1) {
+ if (prog_data->urb_setup[location] == -1) {
/* If there's no incoming setup data for this slot, don't
* emit interpolation for it.
*/
} else {
/* Smooth/noperspective interpolation case. */
for (unsigned int k = 0; k < type->vector_elements; k++) {
- /* FINISHME: At some point we probably want to push
- * this farther by giving similar treatment to the
- * other potentially constant components of the
- * attribute, as well as making brw_vs_constval.c
- * handle varyings other than gl_TexCoord.
- */
struct brw_reg interp = interp_reg(location, k);
- emit_linterp(attr, fs_reg(interp), interpolation_mode,
- ir->data.centroid && !c->key.persample_shading,
- ir->data.sample || c->key.persample_shading);
if (brw->needs_unlit_centroid_workaround && ir->data.centroid) {
/* Get the pixel/sample mask into f0 so that we know
* which pixels are lit. Then, for each channel that is
* data.
*/
emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
- fs_inst *inst = emit_linterp(attr, fs_reg(interp),
- interpolation_mode,
- false, false);
+
+ fs_inst *inst;
+ inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
+ false, false);
inst->predicate = BRW_PREDICATE_NORMAL;
inst->predicate_inverse = true;
+ if (brw->has_pln)
+ inst->no_dd_clear = true;
+
+ inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
+ ir->data.centroid && !key->persample_shading,
+ ir->data.sample || key->persample_shading);
+ inst->predicate = BRW_PREDICATE_NORMAL;
+ inst->predicate_inverse = false;
+ if (brw->has_pln)
+ inst->no_dd_check = true;
+
+ } else {
+ emit_linterp(attr, fs_reg(interp), interpolation_mode,
+ ir->data.centroid && !key->persample_shading,
+ ir->data.sample || key->persample_shading);
}
if (brw->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
{
assert(dst.type == BRW_REGISTER_TYPE_F);
- if (c->key.compute_pos_offset) {
+ if (key->compute_pos_offset) {
/* Convert int_sample_pos to floating point */
emit(MOV(dst, int_sample_pos));
/* Scale to the range [0, 1] */
* the positions using vstride=16, width=8, hstride=2.
*/
struct brw_reg sample_pos_reg =
- stride(retype(brw_vec1_grf(c->sample_pos_reg, 0),
+ stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
BRW_REGISTER_TYPE_B), 16, 8, 2);
- emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+ fs_inst *inst = emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
if (dispatch_width == 16) {
- int_sample_x.sechalf = true;
- fs_inst *inst = emit(MOV(int_sample_x,
- fs_reg(suboffset(sample_pos_reg, 16))));
+ inst->force_uncompressed = true;
+ inst = emit(MOV(half(int_sample_x, 1),
+ fs_reg(suboffset(sample_pos_reg, 16))));
inst->force_sechalf = true;
- int_sample_x.sechalf = false;
}
/* Compute gl_SamplePosition.x */
compute_sample_position(pos, int_sample_x);
pos.reg_offset++;
- emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+ inst = emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
if (dispatch_width == 16) {
- int_sample_y.sechalf = true;
- fs_inst *inst = emit(MOV(int_sample_y,
- fs_reg(suboffset(sample_pos_reg, 17))));
+ inst->force_uncompressed = true;
+ inst = emit(MOV(half(int_sample_y, 1),
+ fs_reg(suboffset(sample_pos_reg, 17))));
inst->force_sechalf = true;
- int_sample_y.sechalf = false;
}
/* Compute gl_SamplePosition.y */
compute_sample_position(pos, int_sample_y);
this->current_annotation = "compute sample id";
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
- if (c->key.compute_sample_id) {
+ if (key->compute_sample_id) {
fs_reg t1 = fs_reg(this, glsl_type::int_type);
fs_reg t2 = fs_reg(this, glsl_type::int_type);
t2.type = BRW_REGISTER_TYPE_UW;
* populating a temporary variable with the sequence (0, 1, 2, 3),
* and then reading from it using vstride=1, width=4, hstride=0.
* These computations hold good for 4x multisampling as well.
+ *
+ * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
+ * the first four slots are sample 0 of subspan 0; the next four
+ * are sample 1 of subspan 0; the third group is sample 0 of
+ * subspan 1, and finally sample 1 of subspan 1.
*/
- emit(BRW_OPCODE_AND, t1,
- fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
- fs_reg(brw_imm_d(0xc0)));
- emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
+ fs_inst *inst;
+ inst = emit(BRW_OPCODE_AND, t1,
+ fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+ fs_reg(0xc0));
+ inst->force_writemask_all = true;
+ inst = emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
+ inst->force_writemask_all = true;
/* This works for both SIMD8 and SIMD16 */
- emit(MOV(t2, brw_imm_v(0x3210)));
+ inst = emit(MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210)));
+ inst->force_writemask_all = true;
/* This special instruction takes care of setting vstride=1,
* width=4, hstride=0 of t2 during an ADD instruction.
*/
return reg;
}
-fs_reg *
-fs_visitor::emit_samplemaskin_setup(ir_variable *ir)
-{
- assert(brw->gen >= 7);
- this->current_annotation = "compute gl_SampleMaskIn";
- fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
- emit(MOV(*reg, fs_reg(retype(brw_vec8_grf(c->sample_mask_reg, 0), BRW_REGISTER_TYPE_D))));
- return reg;
-}
-
fs_reg
fs_visitor::fix_math_operand(fs_reg src)
{
case SHADER_OPCODE_COS:
break;
default:
- assert(!"not reached: bad math opcode");
- return NULL;
+ unreachable("not reached: bad math opcode");
}
/* Can't do hstride == 0 args to gen6 math, so expand it out. We
* Gen 6 hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up.
*/
- if (brw->gen >= 6)
+ if (brw->gen == 6 || brw->gen == 7)
src = fix_math_operand(src);
fs_inst *inst = emit(opcode, dst, src);
switch (opcode) {
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 INTDIV unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 INTDIV unsupported\n");
break;
case SHADER_OPCODE_POW:
break;
default:
- assert(!"not reached: unsupported binary math opcode.");
- return NULL;
+ unreachable("not reached: unsupported binary math opcode.");
}
- if (brw->gen >= 6) {
+ if (brw->gen >= 8) {
+ inst = emit(opcode, dst, src0, src1);
+ } else if (brw->gen >= 6) {
src0 = fix_math_operand(src0);
src1 = fix_math_operand(src1);
void
fs_visitor::assign_curb_setup()
{
- c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
if (dispatch_width == 8) {
- c->prog_data.first_curbe_grf = c->nr_payload_regs;
+ prog_data->base.dispatch_grf_start_reg = payload.num_regs;
} else {
- c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
+ prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
}
- /* Map the offsets in the UNIFORM file to fixed HW regs. */
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
+ prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
- for (unsigned int i = 0; i < 3; i++) {
+ /* Map the offsets in the UNIFORM file to fixed HW regs. */
+ foreach_in_list(fs_inst, inst, &instructions) {
+ for (unsigned int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == UNIFORM) {
- int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
- struct brw_reg brw_reg = brw_vec1_grf(c->nr_payload_regs +
+ int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
+ int constant_nr;
+ if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
+ constant_nr = push_constant_loc[uniform_nr];
+ } else {
+ /* Section 5.11 of the OpenGL 4.1 spec says:
+ * "Out-of-bounds reads return undefined values, which include
+ * values from other variables of the active program or zero."
+ * Just return the first push constant.
+ */
+ constant_nr = 0;
+ }
+
+ struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
constant_nr / 8,
constant_nr % 8);
fs_visitor::calculate_urb_setup()
{
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- c->prog_data.urb_setup[i] = -1;
+ prog_data->urb_setup[i] = -1;
}
int urb_next = 0;
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
if (fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(i)) {
- c->prog_data.urb_setup[i] = urb_next++;
+ prog_data->urb_setup[i] = urb_next++;
}
}
} else {
*/
struct brw_vue_map prev_stage_vue_map;
brw_compute_vue_map(brw, &prev_stage_vue_map,
- c->key.input_slots_valid);
+ key->input_slots_valid);
int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
assert(prev_stage_vue_map.num_slots <= first_slot + 32);
for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
if (varying != BRW_VARYING_SLOT_COUNT &&
(fp->Base.InputsRead & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(varying))) {
- c->prog_data.urb_setup[varying] = slot - first_slot;
+ prog_data->urb_setup[varying] = slot - first_slot;
}
}
urb_next = prev_stage_vue_map.num_slots - first_slot;
if (i == VARYING_SLOT_PSIZ)
continue;
- if (c->key.input_slots_valid & BITFIELD64_BIT(i)) {
+ if (key->input_slots_valid & BITFIELD64_BIT(i)) {
/* The back color slot is skipped when the front color is
* also written to. In addition, some slots can be
* written in the vertex shader and not read in the
* incremented, mapped or not.
*/
if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
- c->prog_data.urb_setup[i] = urb_next;
+ prog_data->urb_setup[i] = urb_next;
urb_next++;
}
}
* See compile_sf_prog() for more info.
*/
if (fp->Base.InputsRead & BITFIELD64_BIT(VARYING_SLOT_PNTC))
- c->prog_data.urb_setup[VARYING_SLOT_PNTC] = urb_next++;
+ prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
- c->prog_data.num_varying_inputs = urb_next;
+ prog_data->num_varying_inputs = urb_next;
}
void
fs_visitor::assign_urb_setup()
{
- int urb_start = c->nr_payload_regs + c->prog_data.curb_read_length;
+ int urb_start = payload.num_regs + prog_data->curb_read_length;
/* Offset all the urb_setup[] index by the actual position of the
* setup regs, now that the location of the constants has been chosen.
*/
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
if (inst->opcode == FS_OPCODE_LINTERP) {
assert(inst->src[2].file == HW_REG);
inst->src[2].fixed_hw_reg.nr += urb_start;
/* Each attribute is 4 setup channels, each of which is half a reg. */
this->first_non_payload_grf =
- urb_start + c->prog_data.num_varying_inputs * 2;
+ urb_start + prog_data->num_varying_inputs * 2;
}
/**
false;
}
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
/* If there's a SEND message that requires contiguous destination
* registers, no splitting is allowed.
*/
* the send is reading the whole thing.
*/
if (inst->is_send_from_grf()) {
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == GRF) {
split_grf[inst->src[i].reg] = false;
}
}
}
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
if (inst->dst.file == GRF &&
split_grf[inst->dst.reg] &&
inst->dst.reg_offset != 0) {
inst->dst.reg_offset - 1);
inst->dst.reg_offset = 0;
}
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == GRF &&
split_grf[inst->src[i].reg] &&
inst->src[i].reg_offset != 0) {
void
fs_visitor::compact_virtual_grfs()
{
+ if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER))
+ return;
+
/* Mark which virtual GRFs are used, and count how many. */
int remap_table[this->virtual_grf_count];
memset(remap_table, -1, sizeof(remap_table));
- foreach_list(node, &this->instructions) {
- const fs_inst *inst = (const fs_inst *) node;
-
+ foreach_in_list(const fs_inst, inst, &instructions) {
if (inst->dst.file == GRF)
remap_table[inst->dst.reg] = 0;
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == GRF)
remap_table[inst->src[i].reg] = 0;
}
}
- /* In addition to registers used in instructions, fs_visitor keeps
- * direct references to certain special values which must be patched:
- */
- fs_reg *special[] = {
- &frag_depth, &pixel_x, &pixel_y, &pixel_w, &wpos_w, &dual_src_output,
- &outputs[0], &outputs[1], &outputs[2], &outputs[3],
- &outputs[4], &outputs[5], &outputs[6], &outputs[7],
- &delta_x[0], &delta_x[1], &delta_x[2],
- &delta_x[3], &delta_x[4], &delta_x[5],
- &delta_y[0], &delta_y[1], &delta_y[2],
- &delta_y[3], &delta_y[4], &delta_y[5],
- };
- STATIC_ASSERT(BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT == 6);
- STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS == 8);
-
- /* Treat all special values as used, to be conservative */
- for (unsigned i = 0; i < ARRAY_SIZE(special); i++) {
- if (special[i]->file == GRF)
- remap_table[special[i]->reg] = 0;
- }
-
/* Compact the GRF arrays. */
int new_index = 0;
for (int i = 0; i < this->virtual_grf_count; i++) {
this->virtual_grf_count = new_index;
/* Patch all the instructions to use the newly renumbered registers */
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *) node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
if (inst->dst.file == GRF)
inst->dst.reg = remap_table[inst->dst.reg];
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == GRF)
inst->src[i].reg = remap_table[inst->src[i].reg];
}
}
-
- /* Patch all the references to special values */
- for (unsigned i = 0; i < ARRAY_SIZE(special); i++) {
- if (special[i]->file == GRF && remap_table[special[i]->reg] != -1)
- special[i]->reg = remap_table[special[i]->reg];
- }
-}
-
-bool
-fs_visitor::remove_dead_constants()
-{
- if (dispatch_width == 8) {
- this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
- this->nr_params_remap = c->prog_data.nr_params;
-
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
- this->params_remap[i] = -1;
-
- /* Find which params are still in use. */
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- for (int i = 0; i < 3; i++) {
- int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
-
- if (inst->src[i].file != UNIFORM)
- continue;
-
- /* Section 5.11 of the OpenGL 4.3 spec says:
- *
- * "Out-of-bounds reads return undefined values, which include
- * values from other variables of the active program or zero."
- */
- if (constant_nr < 0 || constant_nr >= (int)c->prog_data.nr_params) {
- constant_nr = 0;
- }
-
- /* For now, set this to non-negative. We'll give it the
- * actual new number in a moment, in order to keep the
- * register numbers nicely ordered.
- */
- this->params_remap[constant_nr] = 0;
- }
- }
-
- /* Figure out what the new numbers for the params will be. At some
- * point when we're doing uniform array access, we're going to want
- * to keep the distinction between .reg and .reg_offset, but for
- * now we don't care.
- */
- unsigned int new_nr_params = 0;
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
- if (this->params_remap[i] != -1) {
- this->params_remap[i] = new_nr_params++;
- }
- }
-
- /* Update the list of params to be uploaded to match our new numbering. */
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
- int remapped = this->params_remap[i];
-
- if (remapped == -1)
- continue;
-
- c->prog_data.param[remapped] = c->prog_data.param[i];
- }
-
- c->prog_data.nr_params = new_nr_params;
- } else {
- /* This should have been generated in the SIMD8 pass already. */
- assert(this->params_remap);
- }
-
- /* Now do the renumbering of the shader to remove unused params. */
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- for (int i = 0; i < 3; i++) {
- int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
-
- if (inst->src[i].file != UNIFORM)
- continue;
-
- /* as above alias to 0 */
- if (constant_nr < 0 || constant_nr >= (int)this->nr_params_remap) {
- constant_nr = 0;
- }
- assert(this->params_remap[constant_nr] != -1);
- inst->src[i].reg = this->params_remap[constant_nr];
- inst->src[i].reg_offset = 0;
- }
- }
-
- return true;
}
/*
void
fs_visitor::move_uniform_array_access_to_pull_constants()
{
- int pull_constant_loc[c->prog_data.nr_params];
+ if (dispatch_width != 8)
+ return;
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
+ pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+
+ for (unsigned int i = 0; i < uniforms; i++) {
pull_constant_loc[i] = -1;
}
* Note that we don't move constant-indexed accesses to arrays. No
* testing has been done of the performance impact of this choice.
*/
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- for (int i = 0 ; i < 3; i++) {
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
+ for (int i = 0 ; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
continue;
* add it.
*/
if (pull_constant_loc[uniform] == -1) {
- const float **values = &c->prog_data.param[uniform];
-
- pull_constant_loc[uniform] = c->prog_data.nr_pull_params;
+ const float **values = &stage_prog_data->param[uniform];
assert(param_size[uniform]);
for (int j = 0; j < param_size[uniform]; j++) {
- c->prog_data.pull_param[c->prog_data.nr_pull_params++] =
+ pull_constant_loc[uniform + j] = stage_prog_data->nr_pull_params;
+
+ stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
values[j];
}
}
-
- /* Set up the annotation tracking for new generated instructions. */
- base_ir = inst->ir;
- current_annotation = inst->annotation;
-
- fs_reg surf_index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
- fs_reg temp = fs_reg(this, glsl_type::float_type);
- exec_list list = VARYING_PULL_CONSTANT_LOAD(temp,
- surf_index,
- *inst->src[i].reladdr,
- pull_constant_loc[uniform] +
- inst->src[i].reg_offset);
- inst->insert_before(&list);
-
- inst->src[i].file = temp.file;
- inst->src[i].reg = temp.reg;
- inst->src[i].reg_offset = temp.reg_offset;
- inst->src[i].reladdr = NULL;
}
}
}
/**
- * Choose accesses from the UNIFORM file to demote to using the pull
- * constant buffer.
+ * Assign UNIFORM file registers to either push constants or pull constants.
*
* We allow a fragment shader to have more than the specified minimum
* maximum number of fragment shader uniform components (64). If
* update the program to load them.
*/
void
-fs_visitor::setup_pull_constants()
+fs_visitor::assign_constant_locations()
{
- /* Only allow 16 registers (128 uniform components) as push constants. */
- unsigned int max_uniform_components = 16 * 8;
- if (c->prog_data.nr_params <= max_uniform_components)
+ /* Only the first compile (SIMD8 mode) gets to decide on locations. */
+ if (dispatch_width != 8)
return;
- if (dispatch_width == 16) {
- fail("Pull constants not supported in SIMD16\n");
- return;
+ /* Find which UNIFORM registers are still in use. */
+ bool is_live[uniforms];
+ for (unsigned int i = 0; i < uniforms; i++) {
+ is_live[i] = false;
}
- /* Just demote the end of the list. We could probably do better
+ foreach_in_list(fs_inst, inst, &instructions) {
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file != UNIFORM)
+ continue;
+
+ int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
+ if (constant_nr >= 0 && constant_nr < (int) uniforms)
+ is_live[constant_nr] = true;
+ }
+ }
+
+ /* Only allow 16 registers (128 uniform components) as push constants.
+ *
+ * Just demote the end of the list. We could probably do better
* here, demoting things that are rarely used in the program first.
+ *
+ * If changing this value, note the limitation about total_regs in
+ * brw_curbe.c.
*/
- unsigned int pull_uniform_base = max_uniform_components;
+ unsigned int max_push_components = 16 * 8;
+ unsigned int num_push_constants = 0;
- int pull_constant_loc[c->prog_data.nr_params];
- for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
- if (i < pull_uniform_base) {
- pull_constant_loc[i] = -1;
- } else {
- pull_constant_loc[i] = -1;
- /* If our constant is already being uploaded for reladdr purposes,
- * reuse it.
+ push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+
+ for (unsigned int i = 0; i < uniforms; i++) {
+ if (!is_live[i] || pull_constant_loc[i] != -1) {
+ /* This UNIFORM register is either dead, or has already been demoted
+ * to a pull const. Mark it as no longer living in the param[] array.
*/
- for (unsigned int j = 0; j < c->prog_data.nr_pull_params; j++) {
- if (c->prog_data.pull_param[j] == c->prog_data.param[i]) {
- pull_constant_loc[i] = j;
- break;
- }
- }
- if (pull_constant_loc[i] == -1) {
- int pull_index = c->prog_data.nr_pull_params++;
- c->prog_data.pull_param[pull_index] = c->prog_data.param[i];
- pull_constant_loc[i] = pull_index;;
- }
+ push_constant_loc[i] = -1;
+ continue;
+ }
+
+ if (num_push_constants < max_push_components) {
+ /* Retain as a push constant. Record the location in the params[]
+ * array.
+ */
+ push_constant_loc[i] = num_push_constants++;
+ } else {
+ /* Demote to a pull constant. */
+ push_constant_loc[i] = -1;
+
+ int pull_index = stage_prog_data->nr_pull_params++;
+ stage_prog_data->pull_param[pull_index] = stage_prog_data->param[i];
+ pull_constant_loc[i] = pull_index;
}
}
- c->prog_data.nr_params = pull_uniform_base;
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
+ stage_prog_data->nr_params = num_push_constants;
+
+ /* Up until now, the param[] array has been indexed by reg + reg_offset
+ * of UNIFORM registers. Condense it to only contain the uniforms we
+ * chose to upload as push constants.
+ */
+ for (unsigned int i = 0; i < uniforms; i++) {
+ int remapped = push_constant_loc[i];
+
+ if (remapped == -1)
+ continue;
+
+ assert(remapped <= (int)i);
+ stage_prog_data->param[remapped] = stage_prog_data->param[i];
+ }
+}
- for (int i = 0; i < 3; i++) {
+/**
+ * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
+ * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
+ */
+void
+fs_visitor::demote_pull_constants()
+{
+ foreach_in_list(fs_inst, inst, &instructions) {
+ for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM)
continue;
if (pull_index == -1)
continue;
- assert(!inst->src[i].reladdr);
-
- fs_reg dst = fs_reg(this, glsl_type::float_type);
- fs_reg index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
- fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
- fs_inst *pull =
- new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
- dst, index, offset);
- pull->ir = inst->ir;
- pull->annotation = inst->annotation;
+ /* Set up the annotation tracking for new generated instructions. */
+ base_ir = inst->ir;
+ current_annotation = inst->annotation;
- inst->insert_before(pull);
+ fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
+ fs_reg dst = fs_reg(this, glsl_type::float_type);
+
+ /* Generate a pull load into dst. */
+ if (inst->src[i].reladdr) {
+ exec_list list = VARYING_PULL_CONSTANT_LOAD(dst,
+ surf_index,
+ *inst->src[i].reladdr,
+ pull_index);
+ inst->insert_before(&list);
+ inst->src[i].reladdr = NULL;
+ } else {
+ fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
+ fs_inst *pull =
+ new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ dst, surf_index, offset);
+ inst->insert_before(pull);
+ inst->src[i].set_smear(pull_index & 3);
+ }
- inst->src[i].file = GRF;
- inst->src[i].reg = dst.reg;
- inst->src[i].reg_offset = 0;
- inst->src[i].smear = pull_index & 3;
+ /* Rewrite the instruction to use the temporary VGRF. */
+ inst->src[i].file = GRF;
+ inst->src[i].reg = dst.reg;
+ inst->src[i].reg_offset = 0;
}
}
+ invalidate_live_intervals();
}
bool
{
bool progress = false;
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
switch (inst->opcode) {
case BRW_OPCODE_MUL:
if (inst->src[1].file != IMM)
}
break;
case BRW_OPCODE_SEL:
- if (inst->saturate && inst->src[1].file == IMM) {
+ if (inst->src[0].equals(inst->src[1])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[1] = reg_undef;
+ inst->predicate = BRW_PREDICATE_NONE;
+ inst->predicate_inverse = false;
+ progress = true;
+ } else if (inst->saturate && inst->src[1].file == IMM) {
switch (inst->conditional_mod) {
case BRW_CONDITIONAL_LE:
case BRW_CONDITIONAL_L:
switch (inst->src[1].type) {
case BRW_REGISTER_TYPE_F:
- if (inst->src[1].imm.f >= 1.0f) {
+ if (inst->src[1].fixed_hw_reg.dw1.f >= 1.0f) {
inst->opcode = BRW_OPCODE_MOV;
inst->src[1] = reg_undef;
progress = true;
case BRW_CONDITIONAL_G:
switch (inst->src[1].type) {
case BRW_REGISTER_TYPE_F:
- if (inst->src[1].imm.f <= 0.0f) {
+ if (inst->src[1].fixed_hw_reg.dw1.f <= 0.0f) {
inst->opcode = BRW_OPCODE_MOV;
inst->src[1] = reg_undef;
inst->conditional_mod = BRW_CONDITIONAL_NONE;
return progress;
}
-/**
- * Removes any instructions writing a VGRF where that VGRF is not used by any
- * later instruction.
- */
-bool
-fs_visitor::dead_code_eliminate()
-{
- bool progress = false;
- int pc = 0;
-
- calculate_live_intervals();
-
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- if (inst->dst.file == GRF && !inst->has_side_effects()) {
- bool dead = true;
-
- for (int i = 0; i < inst->regs_written; i++) {
- int var = live_intervals->var_from_vgrf[inst->dst.reg];
- assert(live_intervals->end[var + inst->dst.reg_offset + i] >= pc);
- if (live_intervals->end[var + inst->dst.reg_offset + i] != pc) {
- dead = false;
- break;
- }
- }
-
- if (dead) {
- /* Don't dead code eliminate instructions that write to the
- * accumulator as a side-effect. Instead just set the destination
- * to the null register to free it.
- */
- switch (inst->opcode) {
- case BRW_OPCODE_ADDC:
- case BRW_OPCODE_SUBB:
- case BRW_OPCODE_MACH:
- inst->dst = fs_reg(retype(brw_null_reg(), inst->dst.type));
- break;
- default:
- inst->remove();
- progress = true;
- break;
- }
- }
- }
-
- pc++;
- }
-
- if (progress)
- invalidate_live_intervals();
-
- return progress;
-}
-
-struct dead_code_hash_key
-{
- int vgrf;
- int reg_offset;
-};
-
-static bool
-dead_code_hash_compare(const void *a, const void *b)
-{
- return memcmp(a, b, sizeof(struct dead_code_hash_key)) == 0;
-}
-
-static void
-clear_dead_code_hash(struct hash_table *ht)
-{
- struct hash_entry *entry;
-
- hash_table_foreach(ht, entry) {
- _mesa_hash_table_remove(ht, entry);
- }
-}
-
-static void
-insert_dead_code_hash(struct hash_table *ht,
- int vgrf, int reg_offset, fs_inst *inst)
-{
- /* We don't bother freeing keys, because they'll be GCed with the ht. */
- struct dead_code_hash_key *key = ralloc(ht, struct dead_code_hash_key);
-
- key->vgrf = vgrf;
- key->reg_offset = reg_offset;
-
- _mesa_hash_table_insert(ht, _mesa_hash_data(key, sizeof(*key)), key, inst);
-}
-
-static struct hash_entry *
-get_dead_code_hash_entry(struct hash_table *ht, int vgrf, int reg_offset)
-{
- struct dead_code_hash_key key;
-
- key.vgrf = vgrf;
- key.reg_offset = reg_offset;
-
- return _mesa_hash_table_search(ht, _mesa_hash_data(&key, sizeof(key)), &key);
-}
-
-static void
-remove_dead_code_hash(struct hash_table *ht,
- int vgrf, int reg_offset)
-{
- struct hash_entry *entry = get_dead_code_hash_entry(ht, vgrf, reg_offset);
- if (!entry)
- return;
-
- _mesa_hash_table_remove(ht, entry);
-}
-
-/**
- * Walks basic blocks, removing any regs that are written but not read before
- * being redefined.
- *
- * The dead_code_eliminate() function implements a global dead code
- * elimination, but it only handles the removing the last write to a register
- * if it's never read. This one can handle intermediate writes, but only
- * within a basic block.
- */
-bool
-fs_visitor::dead_code_eliminate_local()
-{
- struct hash_table *ht;
- bool progress = false;
-
- ht = _mesa_hash_table_create(mem_ctx, dead_code_hash_compare);
-
- if (ht == NULL) {
- return false;
- }
-
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- /* At a basic block, empty the HT since we don't understand dataflow
- * here.
- */
- if (inst->is_control_flow()) {
- clear_dead_code_hash(ht);
- continue;
- }
-
- /* Clear the HT of any instructions that got read. */
- for (int i = 0; i < 3; i++) {
- fs_reg src = inst->src[i];
- if (src.file != GRF)
- continue;
-
- int read = 1;
- if (inst->is_send_from_grf())
- read = virtual_grf_sizes[src.reg] - src.reg_offset;
-
- for (int reg_offset = src.reg_offset;
- reg_offset < src.reg_offset + read;
- reg_offset++) {
- remove_dead_code_hash(ht, src.reg, reg_offset);
- }
- }
-
- /* Add any update of a GRF to the HT, removing a previous write if it
- * wasn't read.
- */
- if (inst->dst.file == GRF) {
- if (inst->regs_written > 1) {
- /* We don't know how to trim channels from an instruction's
- * writes, so we can't incrementally remove unread channels from
- * it. Just remove whatever it overwrites from the table
- */
- for (int i = 0; i < inst->regs_written; i++) {
- remove_dead_code_hash(ht,
- inst->dst.reg,
- inst->dst.reg_offset + i);
- }
- } else {
- struct hash_entry *entry =
- get_dead_code_hash_entry(ht, inst->dst.reg,
- inst->dst.reg_offset);
-
- if (entry) {
- if (inst->is_partial_write()) {
- /* For a partial write, we can't remove any previous dead code
- * candidate, since we're just modifying their result.
- */
- } else {
- /* We're completely updating a channel, and there was a
- * previous write to the channel that wasn't read. Kill it!
- */
- fs_inst *inst = (fs_inst *)entry->data;
- inst->remove();
- progress = true;
- }
-
- _mesa_hash_table_remove(ht, entry);
- }
-
- if (!inst->has_side_effects())
- insert_dead_code_hash(ht, inst->dst.reg, inst->dst.reg_offset,
- inst);
- }
- }
- }
-
- _mesa_hash_table_destroy(ht, NULL);
-
- if (progress)
- invalidate_live_intervals();
-
- return progress;
-}
-
-/**
- * Implements register coalescing: Checks if the two registers involved in a
- * raw move don't interfere, in which case they can both be stored in the same
- * place and the MOV removed.
- *
- * To do this, all uses of the source of the MOV in the shader are replaced
- * with the destination of the MOV. For example:
- *
- * add vgrf3:F, vgrf1:F, vgrf2:F
- * mov vgrf4:F, vgrf3:F
- * mul vgrf5:F, vgrf5:F, vgrf4:F
- *
- * becomes
- *
- * add vgrf4:F, vgrf1:F, vgrf2:F
- * mul vgrf5:F, vgrf5:F, vgrf4:F
- */
-bool
-fs_visitor::register_coalesce()
-{
- bool progress = false;
-
- calculate_live_intervals();
-
- int src_size = 0;
- int channels_remaining = 0;
- int reg_from = -1, reg_to = -1;
- int reg_to_offset[MAX_SAMPLER_MESSAGE_SIZE];
- fs_inst *mov[MAX_SAMPLER_MESSAGE_SIZE];
-
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- if (inst->opcode != BRW_OPCODE_MOV ||
- inst->is_partial_write() ||
- inst->saturate ||
- inst->src[0].file != GRF ||
- inst->src[0].negate ||
- inst->src[0].abs ||
- inst->src[0].smear != -1 ||
- !inst->src[0].is_contiguous() ||
- inst->dst.file != GRF ||
- inst->dst.type != inst->src[0].type) {
- continue;
- }
-
- if (virtual_grf_sizes[inst->src[0].reg] >
- virtual_grf_sizes[inst->dst.reg])
- continue;
-
- int var_from = live_intervals->var_from_reg(&inst->src[0]);
- int var_to = live_intervals->var_from_reg(&inst->dst);
-
- if (live_intervals->vars_interfere(var_from, var_to) &&
- !inst->dst.equals(inst->src[0])) {
-
- /* We know that the live ranges of A (var_from) and B (var_to)
- * interfere because of the ->vars_interfere() call above. If the end
- * of B's live range is after the end of A's range, then we know two
- * things:
- * - the start of B's live range must be in A's live range (since we
- * already know the two ranges interfere, this is the only remaining
- * possibility)
- * - the interference isn't of the form we're looking for (where B is
- * entirely inside A)
- */
- if (live_intervals->end[var_to] > live_intervals->end[var_from])
- continue;
-
- bool overwritten = false;
- int scan_ip = -1;
-
- foreach_list(n, &this->instructions) {
- fs_inst *scan_inst = (fs_inst *)n;
- scan_ip++;
-
- if (scan_inst->is_control_flow()) {
- overwritten = true;
- break;
- }
-
- if (scan_ip <= live_intervals->start[var_to])
- continue;
-
- if (scan_ip > live_intervals->end[var_to])
- break;
-
- if (scan_inst->dst.equals(inst->dst) ||
- scan_inst->dst.equals(inst->src[0])) {
- overwritten = true;
- break;
- }
- }
-
- if (overwritten)
- continue;
- }
-
- if (reg_from != inst->src[0].reg) {
- reg_from = inst->src[0].reg;
-
- src_size = virtual_grf_sizes[inst->src[0].reg];
- assert(src_size <= MAX_SAMPLER_MESSAGE_SIZE);
-
- channels_remaining = src_size;
- memset(mov, 0, sizeof(mov));
-
- reg_to = inst->dst.reg;
- }
-
- if (reg_to != inst->dst.reg)
- continue;
-
- const int offset = inst->src[0].reg_offset;
- reg_to_offset[offset] = inst->dst.reg_offset;
- mov[offset] = inst;
- channels_remaining--;
-
- if (channels_remaining)
- continue;
-
- bool removed = false;
- for (int i = 0; i < src_size; i++) {
- if (mov[i]) {
- removed = true;
-
- mov[i]->opcode = BRW_OPCODE_NOP;
- mov[i]->conditional_mod = BRW_CONDITIONAL_NONE;
- mov[i]->dst = reg_undef;
- mov[i]->src[0] = reg_undef;
- mov[i]->src[1] = reg_undef;
- mov[i]->src[2] = reg_undef;
- }
- }
-
- foreach_list(node, &this->instructions) {
- fs_inst *scan_inst = (fs_inst *)node;
-
- for (int i = 0; i < src_size; i++) {
- if (mov[i]) {
- if (scan_inst->dst.file == GRF &&
- scan_inst->dst.reg == reg_from &&
- scan_inst->dst.reg_offset == i) {
- scan_inst->dst.reg = reg_to;
- scan_inst->dst.reg_offset = reg_to_offset[i];
- }
- for (int j = 0; j < 3; j++) {
- if (scan_inst->src[j].file == GRF &&
- scan_inst->src[j].reg == reg_from &&
- scan_inst->src[j].reg_offset == i) {
- scan_inst->src[j].reg = reg_to;
- scan_inst->src[j].reg_offset = reg_to_offset[i];
- }
- }
- }
- }
- }
-
- if (removed) {
- live_intervals->start[var_to] = MIN2(live_intervals->start[var_to],
- live_intervals->start[var_from]);
- live_intervals->end[var_to] = MAX2(live_intervals->end[var_to],
- live_intervals->end[var_from]);
- reg_from = -1;
- }
- }
-
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
- if (inst->opcode == BRW_OPCODE_NOP) {
- inst->remove();
- progress = true;
- }
- }
-
- if (progress)
- invalidate_live_intervals();
-
- return progress;
-}
-
bool
fs_visitor::compute_to_mrf()
{
calculate_live_intervals();
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
int ip = next_ip;
next_ip++;
inst->dst.file != MRF || inst->src[0].file != GRF ||
inst->dst.type != inst->src[0].type ||
inst->src[0].abs || inst->src[0].negate ||
- inst->src[0].smear != -1 || !inst->src[0].is_contiguous() ||
+ !inst->src[0].is_contiguous() ||
inst->src[0].subreg_offset)
continue;
*/
fs_inst *scan_inst;
for (scan_inst = (fs_inst *)inst->prev;
- scan_inst->prev != NULL;
+ !scan_inst->is_head_sentinel();
scan_inst = (fs_inst *)scan_inst->prev) {
if (scan_inst->dst.file == GRF &&
scan_inst->dst.reg == inst->src[0].reg) {
* MRF's source GRF that we wanted to rewrite, that stops us.
*/
bool interfered = false;
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < scan_inst->sources; i++) {
if (scan_inst->src[i].file == GRF &&
scan_inst->src[i].reg == inst->src[0].reg &&
scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
memset(last_mrf_move, 0, sizeof(last_mrf_move));
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->is_control_flow()) {
memset(last_mrf_move, 0, sizeof(last_mrf_move));
}
!inst->force_sechalf);
/* Clear the flag for registers that actually got read (as expected). */
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < inst->sources; i++) {
int grf;
if (inst->src[i].file == GRF) {
grf = inst->src[i].reg;
* program.
*/
for (fs_inst *scan_inst = (fs_inst *)inst->prev;
- scan_inst != NULL;
+ !scan_inst->is_head_sentinel();
scan_inst = (fs_inst *)scan_inst->prev) {
/* If we hit control flow, assume that there *are* outstanding
if (brw->gen != 4 || brw->is_g4x)
return;
+ bool progress = false;
+
/* Note that we're done with register allocation, so GRF fs_regs always
* have a .reg_offset of 0.
*/
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->mlen != 0 && inst->dst.file == GRF) {
insert_gen4_pre_send_dependency_workarounds(inst);
insert_gen4_post_send_dependency_workarounds(inst);
+ progress = true;
}
}
+
+ if (progress)
+ invalidate_live_intervals();
}
/**
void
fs_visitor::lower_uniform_pull_constant_loads()
{
- foreach_list(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list(fs_inst, inst, &instructions) {
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
continue;
fs_reg const_offset_reg = inst->src[1];
assert(const_offset_reg.file == IMM &&
const_offset_reg.type == BRW_REGISTER_TYPE_UD);
- const_offset_reg.imm.u /= 4;
+ const_offset_reg.fixed_hw_reg.dw1.ud /= 4;
fs_reg payload = fs_reg(this, glsl_type::uint_type);
/* This is actually going to be a MOV, but since only the first dword
}
}
+bool
+fs_visitor::lower_load_payload()
+{
+ bool progress = false;
+
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
+ if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
+ fs_reg dst = inst->dst;
+
+ /* src[0] represents the (optional) message header. */
+ if (inst->src[0].file != BAD_FILE) {
+ inst->insert_before(MOV(dst, inst->src[0]));
+ }
+ dst.reg_offset++;
+
+ for (int i = 1; i < inst->sources; i++) {
+ inst->insert_before(MOV(dst, inst->src[i]));
+ dst.reg_offset++;
+ }
+
+ inst->remove();
+ progress = true;
+ }
+ }
+
+ if (progress)
+ invalidate_live_intervals();
+
+ return progress;
+}
+
void
fs_visitor::dump_instructions()
+{
+ dump_instructions(NULL);
+}
+
+void
+fs_visitor::dump_instructions(const char *name)
{
calculate_register_pressure();
+ FILE *file = stderr;
+ if (name && geteuid() != 0) {
+ file = fopen(name, "w");
+ if (!file)
+ file = stderr;
+ }
int ip = 0, max_pressure = 0;
- foreach_list(node, &this->instructions) {
- backend_instruction *inst = (backend_instruction *)node;
+ foreach_in_list(backend_instruction, inst, &instructions) {
max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
- printf("{%3d} %4d: ", regs_live_at_ip[ip], ip);
- dump_instruction(inst);
+ fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
+ dump_instruction(inst, file);
++ip;
}
- printf("Maximum %3d registers live at once.\n", max_pressure);
+ fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
+
+ if (file != stderr) {
+ fclose(file);
+ }
}
void
fs_visitor::dump_instruction(backend_instruction *be_inst)
+{
+ dump_instruction(be_inst, stderr);
+}
+
+void
+fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
{
fs_inst *inst = (fs_inst *)be_inst;
if (inst->predicate) {
- printf("(%cf0.%d) ",
+ fprintf(file, "(%cf0.%d) ",
inst->predicate_inverse ? '-' : '+',
inst->flag_subreg);
}
- printf("%s", brw_instruction_name(inst->opcode));
+ fprintf(file, "%s", brw_instruction_name(inst->opcode));
if (inst->saturate)
- printf(".sat");
+ fprintf(file, ".sat");
if (inst->conditional_mod) {
- printf("%s", conditional_modifier[inst->conditional_mod]);
+ fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
if (!inst->predicate &&
(brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
- printf(".f0.%d", inst->flag_subreg);
+ fprintf(file, ".f0.%d", inst->flag_subreg);
}
}
- printf(" ");
+ fprintf(file, " ");
switch (inst->dst.file) {
case GRF:
- printf("vgrf%d", inst->dst.reg);
+ fprintf(file, "vgrf%d", inst->dst.reg);
if (virtual_grf_sizes[inst->dst.reg] != 1 ||
inst->dst.subreg_offset)
- printf("+%d.%d", inst->dst.reg_offset, inst->dst.subreg_offset);
+ fprintf(file, "+%d.%d",
+ inst->dst.reg_offset, inst->dst.subreg_offset);
break;
case MRF:
- printf("m%d", inst->dst.reg);
+ fprintf(file, "m%d", inst->dst.reg);
break;
case BAD_FILE:
- printf("(null)");
+ fprintf(file, "(null)");
break;
case UNIFORM:
- printf("***u%d***", inst->dst.reg);
+ fprintf(file, "***u%d***", inst->dst.reg + inst->dst.reg_offset);
break;
case HW_REG:
if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
switch (inst->dst.fixed_hw_reg.nr) {
case BRW_ARF_NULL:
- printf("null");
+ fprintf(file, "null");
break;
case BRW_ARF_ADDRESS:
- printf("a0.%d", inst->dst.fixed_hw_reg.subnr);
+ fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
break;
case BRW_ARF_ACCUMULATOR:
- printf("acc%d", inst->dst.fixed_hw_reg.subnr);
+ fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
break;
case BRW_ARF_FLAG:
- printf("f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
inst->dst.fixed_hw_reg.subnr);
break;
default:
- printf("arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
+ fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
inst->dst.fixed_hw_reg.subnr);
break;
}
} else {
- printf("hw_reg%d", inst->dst.fixed_hw_reg.nr);
+ fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
}
if (inst->dst.fixed_hw_reg.subnr)
- printf("+%d", inst->dst.fixed_hw_reg.subnr);
+ fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
break;
default:
- printf("???");
+ fprintf(file, "???");
break;
}
- printf(":%s, ", reg_encoding[inst->dst.type]);
+ fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
- for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
+ for (int i = 0; i < inst->sources && inst->src[i].file != BAD_FILE; i++) {
if (inst->src[i].negate)
- printf("-");
+ fprintf(file, "-");
if (inst->src[i].abs)
- printf("|");
+ fprintf(file, "|");
switch (inst->src[i].file) {
case GRF:
- printf("vgrf%d", inst->src[i].reg);
+ fprintf(file, "vgrf%d", inst->src[i].reg);
if (virtual_grf_sizes[inst->src[i].reg] != 1 ||
inst->src[i].subreg_offset)
- printf("+%d.%d", inst->src[i].reg_offset,
- inst->src[i].subreg_offset);
+ fprintf(file, "+%d.%d", inst->src[i].reg_offset,
+ inst->src[i].subreg_offset);
break;
case MRF:
- printf("***m%d***", inst->src[i].reg);
+ fprintf(file, "***m%d***", inst->src[i].reg);
break;
case UNIFORM:
- printf("u%d", inst->src[i].reg);
- if (virtual_grf_sizes[inst->src[i].reg] != 1 ||
- inst->src[i].subreg_offset)
- printf("+%d.%d", inst->src[i].reg_offset,
- inst->src[i].subreg_offset);
+ fprintf(file, "u%d", inst->src[i].reg + inst->src[i].reg_offset);
+ if (inst->src[i].reladdr) {
+ fprintf(file, "+reladdr");
+ } else if (virtual_grf_sizes[inst->src[i].reg] != 1 ||
+ inst->src[i].subreg_offset) {
+ fprintf(file, "+%d.%d", inst->src[i].reg_offset,
+ inst->src[i].subreg_offset);
+ }
break;
case BAD_FILE:
- printf("(null)");
+ fprintf(file, "(null)");
break;
case IMM:
switch (inst->src[i].type) {
case BRW_REGISTER_TYPE_F:
- printf("%ff", inst->src[i].imm.f);
+ fprintf(file, "%ff", inst->src[i].fixed_hw_reg.dw1.f);
break;
case BRW_REGISTER_TYPE_D:
- printf("%dd", inst->src[i].imm.i);
+ fprintf(file, "%dd", inst->src[i].fixed_hw_reg.dw1.d);
break;
case BRW_REGISTER_TYPE_UD:
- printf("%uu", inst->src[i].imm.u);
+ fprintf(file, "%uu", inst->src[i].fixed_hw_reg.dw1.ud);
break;
default:
- printf("???");
+ fprintf(file, "???");
break;
}
break;
case HW_REG:
if (inst->src[i].fixed_hw_reg.negate)
- printf("-");
+ fprintf(file, "-");
if (inst->src[i].fixed_hw_reg.abs)
- printf("|");
+ fprintf(file, "|");
if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
switch (inst->src[i].fixed_hw_reg.nr) {
case BRW_ARF_NULL:
- printf("null");
+ fprintf(file, "null");
break;
case BRW_ARF_ADDRESS:
- printf("a0.%d", inst->src[i].fixed_hw_reg.subnr);
+ fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
break;
case BRW_ARF_ACCUMULATOR:
- printf("acc%d", inst->src[i].fixed_hw_reg.subnr);
+ fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
break;
case BRW_ARF_FLAG:
- printf("f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
inst->src[i].fixed_hw_reg.subnr);
break;
default:
- printf("arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
+ fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
inst->src[i].fixed_hw_reg.subnr);
break;
}
} else {
- printf("hw_reg%d", inst->src[i].fixed_hw_reg.nr);
+ fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
}
if (inst->src[i].fixed_hw_reg.subnr)
- printf("+%d", inst->src[i].fixed_hw_reg.subnr);
+ fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
if (inst->src[i].fixed_hw_reg.abs)
- printf("|");
+ fprintf(file, "|");
break;
default:
- printf("???");
+ fprintf(file, "???");
break;
}
if (inst->src[i].abs)
- printf("|");
+ fprintf(file, "|");
if (inst->src[i].file != IMM) {
- printf(":%s", brw_reg_type_letters(inst->src[i].type));
+ fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
}
- if (i < 2 && inst->src[i + 1].file != BAD_FILE)
- printf(", ");
+ if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
+ fprintf(file, ", ");
}
- printf(" ");
+ fprintf(file, " ");
if (inst->force_uncompressed)
- printf("1sthalf ");
+ fprintf(file, "1sthalf ");
if (inst->force_sechalf)
- printf("2ndhalf ");
+ fprintf(file, "2ndhalf ");
- printf("\n");
+ fprintf(file, "\n");
}
/**
fs_inst *
fs_visitor::get_instruction_generating_reg(fs_inst *start,
fs_inst *end,
- fs_reg reg)
+ const fs_reg ®)
{
if (end == start ||
end->is_partial_write() ||
{
bool uses_depth =
(fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0;
- unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
+ unsigned barycentric_interp_modes = prog_data->barycentric_interp_modes;
assert(brw->gen >= 6);
/* R0-1: masks, pixel X/Y coordinates. */
- c->nr_payload_regs = 2;
+ payload.num_regs = 2;
/* R2: only for 32-pixel dispatch.*/
/* R3-26: barycentric interpolation coordinates. These appear in the
*/
for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
if (barycentric_interp_modes & (1 << i)) {
- c->barycentric_coord_reg[i] = c->nr_payload_regs;
- c->nr_payload_regs += 2;
+ payload.barycentric_coord_reg[i] = payload.num_regs;
+ payload.num_regs += 2;
if (dispatch_width == 16) {
- c->nr_payload_regs += 2;
+ payload.num_regs += 2;
}
}
}
/* R27: interpolated depth if uses source depth */
if (uses_depth) {
- c->source_depth_reg = c->nr_payload_regs;
- c->nr_payload_regs++;
+ payload.source_depth_reg = payload.num_regs;
+ payload.num_regs++;
if (dispatch_width == 16) {
/* R28: interpolated depth if not SIMD8. */
- c->nr_payload_regs++;
+ payload.num_regs++;
}
}
/* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
if (uses_depth) {
- c->source_w_reg = c->nr_payload_regs;
- c->nr_payload_regs++;
+ payload.source_w_reg = payload.num_regs;
+ payload.num_regs++;
if (dispatch_width == 16) {
/* R30: interpolated W if not SIMD8. */
- c->nr_payload_regs++;
+ payload.num_regs++;
}
}
- c->prog_data.uses_pos_offset = c->key.compute_pos_offset;
+ prog_data->uses_pos_offset = key->compute_pos_offset;
/* R31: MSAA position offsets. */
- if (c->prog_data.uses_pos_offset) {
- c->sample_pos_reg = c->nr_payload_regs;
- c->nr_payload_regs++;
+ if (prog_data->uses_pos_offset) {
+ payload.sample_pos_reg = payload.num_regs;
+ payload.num_regs++;
}
/* R32: MSAA input coverage mask */
if (fp->Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN) {
assert(brw->gen >= 7);
- c->sample_mask_reg = c->nr_payload_regs;
- c->nr_payload_regs++;
+ payload.sample_mask_in_reg = payload.num_regs;
+ payload.num_regs++;
if (dispatch_width == 16) {
/* R33: input coverage mask if not SIMD8. */
- c->nr_payload_regs++;
+ payload.num_regs++;
}
}
/* R58-59: interp W for 32-pixel. */
if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
- c->source_depth_to_render_target = true;
+ source_depth_to_render_target = true;
}
}
/* If there are no color regions, we still perform an FB write to a null
* renderbuffer, which we place at surface index 0.
*/
- c->prog_data.binding_table.render_target_start = next_binding_table_offset;
- next_binding_table_offset += MAX2(c->key.nr_color_regions, 1);
+ prog_data->binding_table.render_target_start = next_binding_table_offset;
+ next_binding_table_offset += MAX2(key->nr_color_regions, 1);
assign_common_binding_table_offsets(next_binding_table_offset);
}
void
fs_visitor::calculate_register_pressure()
{
+ invalidate_live_intervals();
calculate_live_intervals();
- int num_instructions = 0;
- foreach_list(node, &this->instructions) {
- ++num_instructions;
- }
+ unsigned num_instructions = instructions.length();
regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
}
}
+/**
+ * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
+ *
+ * The needs_unlit_centroid_workaround ends up producing one of these per
+ * channel of centroid input, so it's good to clean them up.
+ *
+ * An assumption here is that nothing ever modifies the dispatched pixels
+ * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
+ * dictates that anyway.
+ */
+void
+fs_visitor::opt_drop_redundant_mov_to_flags()
+{
+ bool flag_mov_found[2] = {false};
+
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
+ if (inst->is_control_flow()) {
+ memset(flag_mov_found, 0, sizeof(flag_mov_found));
+ } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
+ if (!flag_mov_found[inst->flag_subreg])
+ flag_mov_found[inst->flag_subreg] = true;
+ else
+ inst->remove();
+ } else if (inst->writes_flag()) {
+ flag_mov_found[inst->flag_subreg] = false;
+ }
+ }
+}
+
bool
fs_visitor::run()
{
sanity_param_count = fp->Base.Parameters->NumParameters;
- uint32_t orig_nr_params = c->prog_data.nr_params;
bool allocated_without_spills;
assign_binding_table_offsets();
/* We handle discards by keeping track of the still-live pixels in f0.1.
* Initialize it with the dispatched pixels.
*/
- if (fp->UsesKill || c->key.alpha_test_func) {
+ if (fp->UsesKill || key->alpha_test_func) {
fs_inst *discard_init = emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
discard_init->flag_subreg = 1;
}
* functions called "main").
*/
if (shader) {
- foreach_list(node, &*shader->base.ir) {
- ir_instruction *ir = (ir_instruction *)node;
+ foreach_in_list(ir_instruction, ir, shader->base.ir) {
base_ir = ir;
this->result = reg_undef;
ir->accept(this);
emit(FS_OPCODE_PLACEHOLDER_HALT);
- if (c->key.alpha_test_func)
+ if (key->alpha_test_func)
emit_alpha_test();
emit_fb_writes();
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
- remove_dead_constants();
- setup_pull_constants();
+ assign_constant_locations();
+ demote_pull_constants();
+
+ opt_drop_redundant_mov_to_flags();
+
+#define OPT(pass, args...) do { \
+ pass_num++; \
+ bool this_progress = pass(args); \
+ \
+ if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
+ char filename[64]; \
+ snprintf(filename, 64, "fs%d-%04d-%02d-%02d-" #pass, \
+ dispatch_width, shader_prog->Name, iteration, pass_num); \
+ \
+ backend_visitor::dump_instructions(filename); \
+ } \
+ \
+ progress = progress || this_progress; \
+ } while (false)
+
+ if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
+ char filename[64];
+ snprintf(filename, 64, "fs%d-%04d-00-start",
+ dispatch_width, shader_prog->Name);
+
+ backend_visitor::dump_instructions(filename);
+ }
bool progress;
+ int iteration = 0;
do {
progress = false;
+ iteration++;
+ int pass_num = 0;
compact_virtual_grfs();
- progress = remove_duplicate_mrf_writes() || progress;
-
- progress = opt_algebraic() || progress;
- progress = opt_cse() || progress;
- progress = opt_copy_propagate() || progress;
- progress = opt_peephole_predicated_break() || progress;
- progress = dead_code_eliminate() || progress;
- progress = dead_code_eliminate_local() || progress;
- progress = opt_peephole_sel() || progress;
- progress = dead_control_flow_eliminate(this) || progress;
- progress = opt_saturate_propagation() || progress;
- progress = register_coalesce() || progress;
- progress = compute_to_mrf() || progress;
+ OPT(remove_duplicate_mrf_writes);
+
+ OPT(opt_algebraic);
+ OPT(opt_cse);
+ OPT(opt_copy_propagate);
+ OPT(opt_peephole_predicated_break);
+ OPT(dead_code_eliminate);
+ OPT(opt_peephole_sel);
+ OPT(dead_control_flow_eliminate, this);
+ OPT(opt_saturate_propagation);
+ OPT(register_coalesce);
+ OPT(compute_to_mrf);
} while (progress);
+ if (lower_load_payload()) {
+ register_coalesce();
+ dead_code_eliminate();
+ }
+
lower_uniform_pull_constant_loads();
assign_curb_setup();
if (dispatch_width == 16) {
fail("Failure to register allocate. Reduce number of "
"live scalar values to avoid this.");
+ } else {
+ perf_debug("Fragment shader triggered register spilling. "
+ "Try reducing the number of live scalar values to "
+ "improve performance.\n");
}
/* Since we're out of heuristics, just go spill registers until we
if (!allocated_without_spills)
schedule_instructions(SCHEDULE_POST);
- if (dispatch_width == 8) {
- c->prog_data.reg_blocks = brw_register_blocks(grf_used);
- } else {
- c->prog_data.reg_blocks_16 = brw_register_blocks(grf_used);
-
- /* Make sure we didn't try to sneak in an extra uniform */
- assert(orig_nr_params == c->prog_data.nr_params);
- (void) orig_nr_params;
+ if (last_scratch > 0) {
+ prog_data->total_scratch = brw_get_scratch_size(last_scratch);
}
+ if (dispatch_width == 8)
+ prog_data->reg_blocks = brw_register_blocks(grf_used);
+ else
+ prog_data->reg_blocks_16 = brw_register_blocks(grf_used);
+
/* If any state parameters were appended, then ParameterValues could have
* been realloced, in which case the driver uniform storage set up by
* _mesa_associate_uniform_storage() would point to freed memory. Make
}
const unsigned *
-brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
+brw_wm_fs_emit(struct brw_context *brw,
+ void *mem_ctx,
+ const struct brw_wm_prog_key *key,
+ struct brw_wm_prog_data *prog_data,
struct gl_fragment_program *fp,
struct gl_shader_program *prog,
unsigned *final_assembly_size)
{
bool start_busy = false;
- float start_time = 0;
+ double start_time = 0;
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
if (prog)
shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- if (prog) {
- printf("GLSL IR for native fragment shader %d:\n", prog->Name);
- _mesa_print_ir(shader->base.ir, NULL);
- printf("\n\n");
- } else {
- printf("ARB_fragment_program %d ir for native fragment shader\n",
- fp->Base.Id);
- _mesa_print_program(&fp->Base);
- }
- }
+ if (unlikely(INTEL_DEBUG & DEBUG_WM))
+ brw_dump_ir(brw, "fragment", prog, &shader->base, &fp->Base);
/* Now the main event: Visit the shader IR and generate our FS IR for it.
*/
- fs_visitor v(brw, c, prog, fp, 8);
+ fs_visitor v(brw, mem_ctx, key, prog_data, prog, fp, 8);
if (!v.run()) {
if (prog) {
prog->LinkStatus = false;
}
exec_list *simd16_instructions = NULL;
- fs_visitor v2(brw, c, prog, fp, 16);
+ fs_visitor v2(brw, mem_ctx, key, prog_data, prog, fp, 16);
if (brw->gen >= 5 && likely(!(INTEL_DEBUG & DEBUG_NO16))) {
- if (c->prog_data.nr_pull_params == 0) {
+ if (!v.simd16_unsupported) {
/* Try a SIMD16 compile */
v2.import_uniforms(&v);
if (!v2.run()) {
simd16_instructions = &v2.instructions;
}
} else {
- perf_debug("Skipping SIMD16 due to pull parameters.\n");
+ perf_debug("SIMD16 shader unsupported, falling back to "
+ "SIMD8 at a 10-20%% performance cost: %s", v.no16_msg);
}
}
const unsigned *assembly = NULL;
if (brw->gen >= 8) {
- gen8_fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
+ gen8_fs_generator g(brw, mem_ctx, key, prog_data, prog, fp, v.do_dual_src);
assembly = g.generate_assembly(&v.instructions, simd16_instructions,
final_assembly_size);
} else {
- fs_generator g(brw, c, prog, fp, v.dual_src_output.file != BAD_FILE);
+ fs_generator g(brw, mem_ctx, key, prog_data, prog, fp, v.do_dual_src,
+ v.runtime_check_aads_emit, INTEL_DEBUG & DEBUG_WM);
assembly = g.generate_assembly(&v.instructions, simd16_instructions,
final_assembly_size);
}
if (unlikely(brw->perf_debug) && shader) {
if (shader->compiled_once)
- brw_wm_debug_recompile(brw, prog, &c->key);
+ brw_wm_debug_recompile(brw, prog, key);
shader->compiled_once = true;
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
BRW_FS_VARYING_INPUT_MASK) > 16)
key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;
- key.clamp_fragment_color = ctx->API == API_OPENGL_COMPAT;
-
unsigned sampler_count = _mesa_fls(fp->Base.SamplersUsed);
for (unsigned i = 0; i < sampler_count; i++) {
if (fp->Base.ShadowSamplers & (1 << i)) {