static void init_registers( struct brw_wm_compile *c )
{
struct brw_context *brw = c->func.brw;
- GLuint inputs = (brw->vs.prog_data->outputs_written & DO_SETUP_BITS);
+ struct intel_context *intel = &brw->intel;
GLuint nr_interp_regs = 0;
GLuint i = 0;
GLuint j;
for (j = 0; j < c->grf_limit; j++)
c->pass2_grf[j].nextuse = BRW_WM_MAX_INSN;
- for (j = 0; j < c->key.nr_depth_regs; j++)
+ for (j = 0; j < (c->nr_payload_regs + 1) / 2; j++)
prealloc_reg(c, &c->payload.depth[j], i++);
for (j = 0; j < c->nr_creg; j++)
prealloc_reg(c, &c->creg[j], i++);
- for (j = 0; j < FRAG_ATTRIB_MAX; j++)
- if (inputs & (1<<j)) {
- /* index for vs output and ps input are not the same
- in shader varying */
- GLuint index;
- if (j > FRAG_ATTRIB_VAR0)
- index = j - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
- else
- index = j;
- nr_interp_regs++;
- prealloc_reg(c, &c->payload.input_interp[index], i++);
+ if (intel->gen >= 6) {
+ for (unsigned int j = 0; j < FRAG_ATTRIB_MAX; j++) {
+ if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(j)) {
+ nr_interp_regs++;
+ prealloc_reg(c, &c->payload.input_interp[j], i++);
+ }
+ }
+ } else {
+ for (j = 0; j < VERT_RESULT_MAX; j++) {
+ if (c->key.vp_outputs_written & BITFIELD64_BIT(j)) {
+ int fp_index = _mesa_vert_result_to_frag_attrib(j);
+
+ nr_interp_regs++;
+ if (fp_index >= 0)
+ prealloc_reg(c, &c->payload.input_interp[fp_index], i++);
+ }
}
+ assert(nr_interp_regs >= 1);
+ }
- assert(nr_interp_regs >= 1);
- c->prog_data.first_curbe_grf = c->key.nr_depth_regs * 2;
+ c->prog_data.first_curbe_grf = ALIGN(c->nr_payload_regs, 2);
c->prog_data.urb_read_length = nr_interp_regs * 2;
c->prog_data.curb_read_length = c->nr_creg * 2;
/* Only search those which can change:
*/
if (grf->nextuse < thisinsn) {
- struct brw_wm_ref *ref = grf->value->lastuse;
+ const struct brw_wm_ref *ref = grf->value->lastuse;
/* Has last use of value been passed?
*/
/* Allocate a spill slot. Note that allocations start from 0x40 -
* the first slot is reserved to mean "undef" in brw_wm_emit.c
*/
- if (!value->spill_slot) {
+ if (!value->spill_slot) {
c->last_scratch += 0x40;
value->spill_slot = c->last_scratch;
}
if (grf[i+j].nextuse < group_nextuse)
group_nextuse = grf[i+j].nextuse;
}
-
+
if (group_nextuse > furthest) {
furthest = group_nextuse;
reg = i;
}
assert(furthest != thisinsn);
-
+
/* Any non-empty regs will need to be spilled:
*/
for (j = 0; j < nr; j++)
static void load_args(struct brw_wm_compile *c,
struct brw_wm_instruction *inst)
-{
+{
GLuint thisinsn = inst - c->instruction;
GLuint i,j;
* register allocation and mark the ref as requiring a fill.
*/
GLuint reg = search_contiguous_regs(c, 1, thisinsn);
-
+
c->pass2_grf[reg].value = ref->value;
c->pass2_grf[reg].nextuse = thisinsn;
-
+
ref->value->resident = &c->pass2_grf[reg];
/* Note that a fill is required:
*/
ref->unspill_reg = reg*2;
}
-
+
/* Adjust the hw_reg to point at the value's current location:
*/
assert(ref->value == ref->value->resident->value);
for (insn = 0; insn < c->nr_insns; insn++) {
struct brw_wm_instruction *inst = &c->instruction[insn];
-
+
/* Update registers' nextuse values:
*/
update_register_usage(c, insn);
break;
}
- if (TEST_DST_SPILLS && inst->opcode != WM_PIXELXY)
+ if (TEST_DST_SPILLS && inst->opcode != WM_PIXELXY) {
for (i = 0; i < 4; i++)
if (inst->dst[i])
spill_value(c, inst->dst[i]);
-
+ }
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass2");
}
c->state = PASS2_DONE;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass2/done");
}
}
-
-
-