ref->value = &c->creg[i/16];
ref->insn = 0;
ref->prevuse = NULL;
-
+
return ref;
}
}
*/
c->constref[i].constval = *constval;
c->constref[i].ref = get_param_ref(c, constval);
-
+
return c->constref[i].ref;
}
else {
*/
ref = get_const_ref(c, &plist->ParameterValues[idx][component]);
break;
-
+
case PROGRAM_STATE_VAR:
case PROGRAM_UNIFORM:
/* These may change from run to run:
-
/***********************************************************************
* Straight translation to internal instruction format
*/
static void pass0_set_dst( struct brw_wm_compile *c,
- struct brw_wm_instruction *out,
- const struct prog_instruction *inst,
+ struct brw_wm_instruction *out,
+ const struct prog_instruction *inst,
GLuint writemask )
{
const struct prog_dst_register *dst = &inst->DstReg;
for (i = 0; i < 4; i++) {
if (writemask & (1<<i)) {
out->dst[i] = get_value(c);
-
pass0_set_fpreg_value(c, dst->File, dst->Index, i, out->dst[i]);
}
}
-
+
out->writemask = writemask;
}
static void pass0_set_dst_scalar( struct brw_wm_compile *c,
- struct brw_wm_instruction *out,
- const struct prog_instruction *inst,
+ struct brw_wm_instruction *out,
+ const struct prog_instruction *inst,
GLuint writemask )
{
if (writemask) {
}
-
static const struct brw_wm_ref *get_fp_src_reg_ref( struct brw_wm_compile *c,
struct prog_src_register src,
GLuint i )
static const GLfloat const_zero = 0.0;
static const GLfloat const_one = 1.0;
-
if (component == SWIZZLE_ZERO)
src_ref = get_const_ref(c, &const_zero);
else if (component == SWIZZLE_ONE)
src_ref = get_const_ref(c, &const_one);
else
src_ref = pass0_get_reg(c, src.File, src.Index, component);
-
+
return src_ref;
}
{
const struct brw_wm_ref *ref = get_fp_src_reg_ref(c, src, i);
struct brw_wm_ref *newref = get_ref(c);
-
+
newref->value = ref->value;
newref->hw_reg = ref->hw_reg;
- if (insn) {
+ if (insn) {
newref->insn = insn - c->instruction;
newref->prevuse = newref->value->lastuse;
newref->value->lastuse = newref;
}
- if (src.NegateBase & (1<<i))
+ if (src.NegateBase & (1<<i))
newref->hw_reg.negate ^= 1;
-
+
if (src.Abs) {
newref->hw_reg.negate = 0;
newref->hw_reg.abs = 1;
}
-
static struct brw_wm_instruction *translate_insn( struct brw_wm_compile *c,
const struct prog_instruction *inst )
{
&c->payload.input_interp[i] );
}
+
/***********************************************************************
* PASS 0
*
for (insn = 0; insn < c->nr_fp_insns; insn++) {
const struct prog_instruction *inst = &c->prog_instructions[insn];
-
/* Optimize away moves, otherwise emit translated instruction:
*/
switch (inst->Opcode) {
translate_insn(c, inst);
}
break;
-
-
default:
translate_insn(c, inst);
break;
brw_wm_print_program(c, "pass0");
}
}
-
if (ref == value->lastuse) {
value->lastuse = ref->prevuse;
- } else {
+ }
+ else {
struct brw_wm_ref *i = value->lastuse;
while (i->prevuse != ref) i = i->prevuse;
i->prevuse = ref->prevuse;
for (i = 0; i < 4; i++) {
struct brw_wm_ref *ref = inst->src[arg][i];
if (ref) {
- if (readmask & (1<<i))
+ if (readmask & (1<<i)) {
ref->value->contributes_to_output = 1;
+ }
else {
unlink_ref(ref);
inst->src[arg][i] = NULL;
static GLuint get_texcoord_mask( GLuint tex_idx )
{
switch (tex_idx) {
- case TEXTURE_1D_INDEX: return WRITEMASK_X;
- case TEXTURE_2D_INDEX: return WRITEMASK_XY;
- case TEXTURE_3D_INDEX: return WRITEMASK_XYZ;
- case TEXTURE_CUBE_INDEX: return WRITEMASK_XYZ;
- case TEXTURE_RECT_INDEX: return WRITEMASK_XY;
+ case TEXTURE_1D_INDEX:
+ return WRITEMASK_X;
+ case TEXTURE_2D_INDEX:
+ return WRITEMASK_XY;
+ case TEXTURE_3D_INDEX:
+ return WRITEMASK_XYZ;
+ case TEXTURE_CUBE_INDEX:
+ return WRITEMASK_XYZ;
+ case TEXTURE_RECT_INDEX:
+ return WRITEMASK_XY;
default: return 0;
}
}
+
/* Step two: Basically this is dead code elimination.
*
* Iterate backwards over instructions, noting which values
brw_wm_print_program(c, "pass1");
}
}
-
-
-
for (j = 0; j < c->nr_creg; j++)
prealloc_reg(c, &c->creg[j], i++);
- for (j = 0; j < FRAG_ATTRIB_MAX; j++)
+ for (j = 0; j < FRAG_ATTRIB_MAX; j++) {
if (inputs & (1<<j)) {
/* index for vs output and ps input are not the same
in shader varying */
nr_interp_regs++;
prealloc_reg(c, &c->payload.input_interp[index], i++);
}
+ }
assert(nr_interp_regs >= 1);
/* Only search those which can change:
*/
if (grf->nextuse < thisinsn) {
- struct brw_wm_ref *ref = grf->value->lastuse;
+ const struct brw_wm_ref *ref = grf->value->lastuse;
/* Has last use of value been passed?
*/
/* Allocate a spill slot. Note that allocations start from 0x40 -
* the first slot is reserved to mean "undef" in brw_wm_emit.c
*/
- if (!value->spill_slot) {
+ if (!value->spill_slot) {
c->last_scratch += 0x40;
value->spill_slot = c->last_scratch;
}
if (grf[i+j].nextuse < group_nextuse)
group_nextuse = grf[i+j].nextuse;
}
-
+
if (group_nextuse > furthest) {
furthest = group_nextuse;
reg = i;
}
assert(furthest != thisinsn);
-
+
/* Any non-empty regs will need to be spilled:
*/
for (j = 0; j < nr; j++)
static void load_args(struct brw_wm_compile *c,
struct brw_wm_instruction *inst)
-{
+{
GLuint thisinsn = inst - c->instruction;
GLuint i,j;
* register allocation and mark the ref as requiring a fill.
*/
GLuint reg = search_contiguous_regs(c, 1, thisinsn);
-
+
c->pass2_grf[reg].value = ref->value;
c->pass2_grf[reg].nextuse = thisinsn;
-
+
ref->value->resident = &c->pass2_grf[reg];
/* Note that a fill is required:
*/
ref->unspill_reg = reg*2;
}
-
+
/* Adjust the hw_reg to point at the value's current location:
*/
assert(ref->value == ref->value->resident->value);
for (insn = 0; insn < c->nr_insns; insn++) {
struct brw_wm_instruction *inst = &c->instruction[insn];
-
+
/* Update registers' nextuse values:
*/
update_register_usage(c, insn);
break;
}
- if (TEST_DST_SPILLS && inst->opcode != WM_PIXELXY)
+ if (TEST_DST_SPILLS && inst->opcode != WM_PIXELXY) {
for (i = 0; i < 4; i++)
if (inst->dst[i])
spill_value(c, inst->dst[i]);
-
+ }
}
if (INTEL_DEBUG & DEBUG_WM) {
brw_wm_print_program(c, "pass2/done");
}
}
-
-
-